2024-11-17 22:45:59,824 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-17 22:45:59,838 main DEBUG Took 0.011998 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-17 22:45:59,839 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-17 22:45:59,839 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-17 22:45:59,841 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-17 22:45:59,843 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,855 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-17 22:45:59,871 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,873 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,874 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,875 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,876 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,876 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,877 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,878 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,878 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,879 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,879 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,880 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,880 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,881 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,881 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,881 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,882 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,882 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,883 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,883 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,883 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,884 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,884 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,884 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 22:45:59,885 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,885 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-17 22:45:59,886 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 22:45:59,888 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-17 22:45:59,890 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-17 22:45:59,891 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-17 22:45:59,893 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-17 22:45:59,894 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-17 22:45:59,906 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-17 22:45:59,910 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-17 22:45:59,912 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-17 22:45:59,913 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-17 22:45:59,913 main DEBUG createAppenders(={Console}) 2024-11-17 22:45:59,914 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-17 22:45:59,915 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-17 22:45:59,915 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-17 22:45:59,916 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-17 22:45:59,917 main DEBUG OutputStream closed 2024-11-17 22:45:59,917 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-17 22:45:59,917 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-17 22:45:59,918 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-17 22:46:00,005 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-17 22:46:00,007 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-17 22:46:00,008 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-17 22:46:00,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-17 22:46:00,010 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-17 22:46:00,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-17 22:46:00,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-17 22:46:00,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-17 22:46:00,012 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-17 22:46:00,012 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-17 22:46:00,013 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-17 22:46:00,013 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-17 22:46:00,014 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-17 22:46:00,015 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-17 22:46:00,015 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-17 22:46:00,016 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-17 22:46:00,016 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-17 22:46:00,018 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-17 22:46:00,020 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-17 22:46:00,021 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-17 22:46:00,021 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-17 22:46:00,022 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-17T22:46:00,301 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4 2024-11-17 22:46:00,304 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-17 22:46:00,305 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-17T22:46:00,316 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-17T22:46:00,357 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=318, ProcessCount=11, AvailableMemoryMB=5502 2024-11-17T22:46:00,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T22:46:00,383 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f, deleteOnExit=true 2024-11-17T22:46:00,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T22:46:00,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/test.cache.data in system properties and HBase conf 2024-11-17T22:46:00,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T22:46:00,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.log.dir in system properties and HBase conf 2024-11-17T22:46:00,388 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T22:46:00,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T22:46:00,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T22:46:00,494 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-17T22:46:00,617 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T22:46:00,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:46:00,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:46:00,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T22:46:00,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:46:00,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T22:46:00,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T22:46:00,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:46:00,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:46:00,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T22:46:00,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/nfs.dump.dir in system properties and HBase conf 2024-11-17T22:46:00,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/java.io.tmpdir in system properties and HBase conf 2024-11-17T22:46:00,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:46:00,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T22:46:00,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T22:46:01,227 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:46:01,595 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-17T22:46:01,695 INFO [Time-limited test {}] log.Log(170): Logging initialized @2719ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-17T22:46:01,792 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:46:01,865 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:46:01,885 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:46:01,885 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:46:01,887 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:46:01,900 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:46:01,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:46:01,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:46:02,134 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/java.io.tmpdir/jetty-localhost-44679-hadoop-hdfs-3_4_1-tests_jar-_-any-16289200000076160210/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:46:02,143 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:44679} 2024-11-17T22:46:02,143 INFO [Time-limited test {}] server.Server(415): Started @3167ms 2024-11-17T22:46:02,177 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:46:02,577 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:46:02,585 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:46:02,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:46:02,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:46:02,591 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:46:02,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:46:02,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:46:02,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c2fdbac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/java.io.tmpdir/jetty-localhost-33467-hadoop-hdfs-3_4_1-tests_jar-_-any-14648636742918734040/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:46:02,736 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:33467} 2024-11-17T22:46:02,737 INFO [Time-limited test {}] server.Server(415): Started @3761ms 2024-11-17T22:46:02,807 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:46:02,949 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:46:02,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:46:02,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:46:02,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:46:02,966 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:46:02,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:46:02,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:46:03,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1467625d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/java.io.tmpdir/jetty-localhost-33897-hadoop-hdfs-3_4_1-tests_jar-_-any-4710449743503695025/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:46:03,086 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:33897} 2024-11-17T22:46:03,086 INFO [Time-limited test {}] server.Server(415): Started @4110ms 2024-11-17T22:46:03,089 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:46:03,265 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/data/data1/current/BP-1157280779-172.17.0.2-1731883561327/current, will proceed with Du for space computation calculation, 2024-11-17T22:46:03,267 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/data/data3/current/BP-1157280779-172.17.0.2-1731883561327/current, will proceed with Du for space computation calculation, 2024-11-17T22:46:03,270 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/data/data2/current/BP-1157280779-172.17.0.2-1731883561327/current, will proceed with Du for space computation calculation, 2024-11-17T22:46:03,277 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/data/data4/current/BP-1157280779-172.17.0.2-1731883561327/current, will proceed with Du for space computation calculation, 2024-11-17T22:46:03,352 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:46:03,358 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:46:03,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a0ed76f4d3d70b1 with lease ID 0xcbbb8f04c021f381: Processing first storage report for DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac from datanode DatanodeRegistration(127.0.0.1:43717, datanodeUuid=b6c6a6f3-efec-4b27-9acc-82e43e164a85, infoPort=34647, infoSecurePort=0, ipcPort=44637, storageInfo=lv=-57;cid=testClusterID;nsid=1422209140;c=1731883561327) 2024-11-17T22:46:03,438 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a0ed76f4d3d70b1 with lease ID 0xcbbb8f04c021f381: from storage DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac node DatanodeRegistration(127.0.0.1:43717, datanodeUuid=b6c6a6f3-efec-4b27-9acc-82e43e164a85, infoPort=34647, infoSecurePort=0, ipcPort=44637, storageInfo=lv=-57;cid=testClusterID;nsid=1422209140;c=1731883561327), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-17T22:46:03,439 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x17503bf1b8c06ffb with lease ID 0xcbbb8f04c021f382: Processing first storage report for DS-5fe2b393-6f38-48da-812f-2a44bebea2ce from datanode DatanodeRegistration(127.0.0.1:37031, datanodeUuid=c098af76-f143-4cba-acce-c27b646b038b, infoPort=38333, infoSecurePort=0, ipcPort=39437, storageInfo=lv=-57;cid=testClusterID;nsid=1422209140;c=1731883561327) 2024-11-17T22:46:03,439 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x17503bf1b8c06ffb with lease ID 0xcbbb8f04c021f382: from storage DS-5fe2b393-6f38-48da-812f-2a44bebea2ce node DatanodeRegistration(127.0.0.1:37031, datanodeUuid=c098af76-f143-4cba-acce-c27b646b038b, infoPort=38333, infoSecurePort=0, ipcPort=39437, storageInfo=lv=-57;cid=testClusterID;nsid=1422209140;c=1731883561327), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T22:46:03,439 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a0ed76f4d3d70b1 with lease ID 0xcbbb8f04c021f381: Processing first storage report for DS-e28170fa-0bad-4d05-bf7f-9fe392832953 from datanode DatanodeRegistration(127.0.0.1:43717, datanodeUuid=b6c6a6f3-efec-4b27-9acc-82e43e164a85, infoPort=34647, infoSecurePort=0, ipcPort=44637, storageInfo=lv=-57;cid=testClusterID;nsid=1422209140;c=1731883561327) 2024-11-17T22:46:03,439 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a0ed76f4d3d70b1 with lease ID 0xcbbb8f04c021f381: from storage DS-e28170fa-0bad-4d05-bf7f-9fe392832953 node DatanodeRegistration(127.0.0.1:43717, datanodeUuid=b6c6a6f3-efec-4b27-9acc-82e43e164a85, infoPort=34647, infoSecurePort=0, ipcPort=44637, storageInfo=lv=-57;cid=testClusterID;nsid=1422209140;c=1731883561327), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:46:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x17503bf1b8c06ffb with lease ID 0xcbbb8f04c021f382: Processing first storage report for DS-603a2b2c-e626-44b8-89d0-cda31a089d46 from datanode DatanodeRegistration(127.0.0.1:37031, datanodeUuid=c098af76-f143-4cba-acce-c27b646b038b, infoPort=38333, infoSecurePort=0, ipcPort=39437, storageInfo=lv=-57;cid=testClusterID;nsid=1422209140;c=1731883561327) 2024-11-17T22:46:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x17503bf1b8c06ffb with lease ID 0xcbbb8f04c021f382: from storage DS-603a2b2c-e626-44b8-89d0-cda31a089d46 node DatanodeRegistration(127.0.0.1:37031, datanodeUuid=c098af76-f143-4cba-acce-c27b646b038b, infoPort=38333, infoSecurePort=0, ipcPort=39437, storageInfo=lv=-57;cid=testClusterID;nsid=1422209140;c=1731883561327), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T22:46:03,505 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4 2024-11-17T22:46:03,588 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/zookeeper_0, clientPort=63688, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T22:46:03,602 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63688 2024-11-17T22:46:03,618 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:46:03,625 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:46:03,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:46:03,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:46:04,326 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c with version=8 2024-11-17T22:46:04,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase-staging 2024-11-17T22:46:04,417 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-17T22:46:04,667 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:46:04,677 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:46:04,677 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:46:04,682 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:46:04,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:46:04,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:46:04,841 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T22:46:04,907 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-17T22:46:04,919 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-17T22:46:04,923 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:46:04,951 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 64073 (auto-detected) 2024-11-17T22:46:04,952 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-17T22:46:04,978 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42095 2024-11-17T22:46:05,000 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42095 connecting to ZooKeeper ensemble=127.0.0.1:63688 2024-11-17T22:46:05,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420950x0, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:46:05,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42095-0x1004fdd569d0000 connected 2024-11-17T22:46:05,078 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:46:05,082 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:46:05,096 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:46:05,102 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c, hbase.cluster.distributed=false 2024-11-17T22:46:05,132 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:46:05,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42095 2024-11-17T22:46:05,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42095 2024-11-17T22:46:05,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42095 2024-11-17T22:46:05,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42095 2024-11-17T22:46:05,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42095 2024-11-17T22:46:05,281 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:46:05,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:46:05,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:46:05,284 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:46:05,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:46:05,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:46:05,288 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T22:46:05,292 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:46:05,293 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34025 2024-11-17T22:46:05,295 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34025 connecting to ZooKeeper ensemble=127.0.0.1:63688 2024-11-17T22:46:05,297 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:46:05,303 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:46:05,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:340250x0, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:46:05,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:340250x0, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:46:05,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34025-0x1004fdd569d0001 connected 2024-11-17T22:46:05,319 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T22:46:05,329 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T22:46:05,331 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T22:46:05,337 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:46:05,339 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34025 2024-11-17T22:46:05,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34025 2024-11-17T22:46:05,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34025 2024-11-17T22:46:05,343 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34025 2024-11-17T22:46:05,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34025 2024-11-17T22:46:05,367 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a6e40b21a48:42095 2024-11-17T22:46:05,369 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:05,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:46:05,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:46:05,379 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:05,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T22:46:05,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:05,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:05,402 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T22:46:05,403 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a6e40b21a48,42095,1731883564467 from backup master directory 2024-11-17T22:46:05,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:46:05,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:05,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:46:05,408 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:46:05,408 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:05,411 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-17T22:46:05,412 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-17T22:46:05,490 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase.id] with ID: d76533c3-3d0d-4ae2-86bd-a0a58a41ae09 2024-11-17T22:46:05,490 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/.tmp/hbase.id 2024-11-17T22:46:05,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:46:05,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:46:05,505 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/.tmp/hbase.id]:[hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase.id] 2024-11-17T22:46:05,560 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:46:05,566 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T22:46:05,588 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-17T22:46:05,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:05,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:46:05,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:46:05,627 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:46:05,629 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T22:46:05,634 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:46:05,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:46:05,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:46:05,693 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store 2024-11-17T22:46:05,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:46:05,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:46:05,721 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-17T22:46:05,725 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:46:05,727 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:46:05,727 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:46:05,727 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:46:05,730 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:46:05,730 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:46:05,731 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:46:05,732 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883565727Disabling compacts and flushes for region at 1731883565727Disabling writes for close at 1731883565730 (+3 ms)Writing region close event to WAL at 1731883565731 (+1 ms)Closed at 1731883565731 2024-11-17T22:46:05,736 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/.initializing 2024-11-17T22:46:05,736 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/WALs/1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:05,760 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C42095%2C1731883564467, suffix=, logDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/WALs/1a6e40b21a48,42095,1731883564467, archiveDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/oldWALs, maxLogs=10 2024-11-17T22:46:05,770 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C42095%2C1731883564467.1731883565765 2024-11-17T22:46:05,795 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/WALs/1a6e40b21a48,42095,1731883564467/1a6e40b21a48%2C42095%2C1731883564467.1731883565765 2024-11-17T22:46:05,811 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38333:38333),(127.0.0.1/127.0.0.1:34647:34647)] 2024-11-17T22:46:05,813 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:46:05,814 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:46:05,817 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,819 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,858 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T22:46:05,895 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:05,899 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:05,900 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T22:46:05,903 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:05,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:46:05,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T22:46:05,908 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:05,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:46:05,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T22:46:05,912 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:05,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:46:05,914 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,918 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,920 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,926 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,927 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,932 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T22:46:05,937 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:46:05,943 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:46:05,944 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768951, jitterRate=-0.022228404879570007}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T22:46:05,952 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731883565832Initializing all the Stores at 1731883565834 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883565834Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883565835 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883565835Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883565835Cleaning up temporary data from old regions at 1731883565927 (+92 ms)Region opened successfully at 1731883565952 (+25 ms) 2024-11-17T22:46:05,954 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T22:46:05,994 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5697b76b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:46:06,027 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T22:46:06,039 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T22:46:06,039 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T22:46:06,042 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T22:46:06,043 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-17T22:46:06,047 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-17T22:46:06,048 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T22:46:06,077 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T22:46:06,088 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T22:46:06,091 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T22:46:06,094 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T22:46:06,096 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T22:46:06,098 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T22:46:06,100 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T22:46:06,106 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T22:46:06,107 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T22:46:06,109 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T22:46:06,110 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T22:46:06,131 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T22:46:06,132 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T22:46:06,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:46:06,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:46:06,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:06,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:06,140 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a6e40b21a48,42095,1731883564467, sessionid=0x1004fdd569d0000, setting cluster-up flag (Was=false) 2024-11-17T22:46:06,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:06,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:06,157 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T22:46:06,160 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:06,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:06,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:06,173 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T22:46:06,175 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:06,183 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T22:46:06,251 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(746): ClusterId : d76533c3-3d0d-4ae2-86bd-a0a58a41ae09 2024-11-17T22:46:06,255 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T22:46:06,260 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T22:46:06,260 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T22:46:06,264 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T22:46:06,264 DEBUG [RS:0;1a6e40b21a48:34025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@669ccc97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:46:06,265 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T22:46:06,274 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T22:46:06,279 DEBUG [RS:0;1a6e40b21a48:34025 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a6e40b21a48:34025 2024-11-17T22:46:06,282 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T22:46:06,284 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T22:46:06,284 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T22:46:06,285 DEBUG [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T22:46:06,287 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a6e40b21a48,42095,1731883564467 with port=34025, startcode=1731883565240 2024-11-17T22:46:06,288 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a6e40b21a48,42095,1731883564467 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T22:46:06,295 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:46:06,296 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:46:06,296 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:46:06,296 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:46:06,296 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a6e40b21a48:0, corePoolSize=10, maxPoolSize=10 2024-11-17T22:46:06,296 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,297 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:46:06,297 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,300 DEBUG [RS:0;1a6e40b21a48:34025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T22:46:06,311 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:46:06,311 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T22:46:06,318 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:06,318 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T22:46:06,322 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731883596322 2024-11-17T22:46:06,324 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T22:46:06,326 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T22:46:06,332 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T22:46:06,333 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T22:46:06,334 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T22:46:06,334 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T22:46:06,335 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,342 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T22:46:06,344 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T22:46:06,344 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T22:46:06,349 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T22:46:06,350 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T22:46:06,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:46:06,357 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883566351,5,FailOnTimeoutGroup] 2024-11-17T22:46:06,361 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883566358,5,FailOnTimeoutGroup] 2024-11-17T22:46:06,361 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,361 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T22:46:06,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:46:06,365 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,365 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,366 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T22:46:06,366 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c 2024-11-17T22:46:06,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:46:06,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:46:06,388 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47999, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T22:46:06,395 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42095 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:06,397 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42095 {}] master.ServerManager(517): Registering regionserver=1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:06,410 DEBUG [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c 2024-11-17T22:46:06,411 DEBUG [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40071 2024-11-17T22:46:06,411 DEBUG [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T22:46:06,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:46:06,415 DEBUG [RS:0;1a6e40b21a48:34025 {}] zookeeper.ZKUtil(111): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:06,415 WARN [RS:0;1a6e40b21a48:34025 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:46:06,415 INFO [RS:0;1a6e40b21a48:34025 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:46:06,416 DEBUG [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:06,419 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a6e40b21a48,34025,1731883565240] 2024-11-17T22:46:06,457 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T22:46:06,480 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T22:46:06,487 INFO [RS:0;1a6e40b21a48:34025 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:46:06,487 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,489 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T22:46:06,496 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T22:46:06,498 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,498 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,498 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,499 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,499 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,499 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,499 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:46:06,499 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,500 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,500 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,500 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,500 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,500 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:46:06,501 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:46:06,501 DEBUG [RS:0;1a6e40b21a48:34025 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:46:06,502 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,502 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,502 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,503 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,503 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,503 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,34025,1731883565240-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:46:06,524 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T22:46:06,527 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,34025,1731883565240-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,528 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,528 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.Replication(171): 1a6e40b21a48,34025,1731883565240 started 2024-11-17T22:46:06,551 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:06,552 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1482): Serving as 1a6e40b21a48,34025,1731883565240, RpcServer on 1a6e40b21a48/172.17.0.2:34025, sessionid=0x1004fdd569d0001 2024-11-17T22:46:06,553 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T22:46:06,553 DEBUG [RS:0;1a6e40b21a48:34025 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:06,553 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,34025,1731883565240' 2024-11-17T22:46:06,553 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T22:46:06,554 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T22:46:06,555 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T22:46:06,555 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T22:46:06,556 DEBUG [RS:0;1a6e40b21a48:34025 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:06,556 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,34025,1731883565240' 2024-11-17T22:46:06,556 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T22:46:06,559 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T22:46:06,562 DEBUG [RS:0;1a6e40b21a48:34025 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T22:46:06,562 INFO [RS:0;1a6e40b21a48:34025 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T22:46:06,563 INFO [RS:0;1a6e40b21a48:34025 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T22:46:06,671 INFO [RS:0;1a6e40b21a48:34025 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C34025%2C1731883565240, suffix=, logDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240, archiveDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs, maxLogs=32 2024-11-17T22:46:06,674 INFO [RS:0;1a6e40b21a48:34025 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883566673 2024-11-17T22:46:06,686 INFO [RS:0;1a6e40b21a48:34025 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883566673 2024-11-17T22:46:06,689 DEBUG [RS:0;1a6e40b21a48:34025 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34647:34647),(127.0.0.1/127.0.0.1:38333:38333)] 2024-11-17T22:46:06,788 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:46:06,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:46:06,793 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:46:06,794 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:06,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:06,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:46:06,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:46:06,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:06,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:06,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:46:06,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:46:06,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:06,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:06,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:46:06,809 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:46:06,810 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:06,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:06,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:46:06,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740 2024-11-17T22:46:06,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740 2024-11-17T22:46:06,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:46:06,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:46:06,821 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:46:06,827 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:46:06,832 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:46:06,833 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721629, jitterRate=-0.08240202069282532}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:46:06,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731883566788Initializing all the Stores at 1731883566790 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883566790Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883566790Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883566790Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883566790Cleaning up temporary data from old regions at 1731883566819 (+29 ms)Region opened successfully at 1731883566836 (+17 ms) 2024-11-17T22:46:06,837 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:46:06,837 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:46:06,837 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:46:06,837 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:46:06,837 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:46:06,839 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:46:06,840 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883566837Disabling compacts and flushes for region at 1731883566837Disabling writes for close at 1731883566837Writing region close event to WAL at 1731883566839 (+2 ms)Closed at 1731883566839 2024-11-17T22:46:06,843 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:46:06,843 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T22:46:06,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T22:46:06,863 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:46:06,866 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T22:46:07,019 DEBUG [1a6e40b21a48:42095 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T22:46:07,032 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:07,038 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,34025,1731883565240, state=OPENING 2024-11-17T22:46:07,042 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T22:46:07,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:07,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:46:07,045 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:46:07,045 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:46:07,047 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:46:07,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,34025,1731883565240}] 2024-11-17T22:46:07,223 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T22:46:07,226 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34997, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T22:46:07,238 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T22:46:07,239 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:46:07,243 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C34025%2C1731883565240.meta, suffix=.meta, logDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240, archiveDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs, maxLogs=32 2024-11-17T22:46:07,246 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.meta.1731883567246.meta 2024-11-17T22:46:07,266 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.meta.1731883567246.meta 2024-11-17T22:46:07,268 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38333:38333),(127.0.0.1/127.0.0.1:34647:34647)] 2024-11-17T22:46:07,269 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:46:07,271 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T22:46:07,274 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T22:46:07,278 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T22:46:07,282 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T22:46:07,283 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:46:07,283 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T22:46:07,283 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T22:46:07,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:46:07,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:46:07,288 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:07,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:07,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:46:07,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:46:07,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:07,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:07,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:46:07,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:46:07,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:07,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:07,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:46:07,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:46:07,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:07,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:46:07,300 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:46:07,302 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740 2024-11-17T22:46:07,305 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740 2024-11-17T22:46:07,307 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:46:07,307 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:46:07,308 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:46:07,311 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:46:07,313 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698121, jitterRate=-0.11229430139064789}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:46:07,313 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T22:46:07,315 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731883567284Writing region info on filesystem at 1731883567284Initializing all the Stores at 1731883567286 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883567286Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883567286Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883567286Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883567286Cleaning up temporary data from old regions at 1731883567307 (+21 ms)Running coprocessor post-open hooks at 1731883567313 (+6 ms)Region opened successfully at 1731883567315 (+2 ms) 2024-11-17T22:46:07,324 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731883567215 2024-11-17T22:46:07,339 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T22:46:07,340 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T22:46:07,343 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:07,346 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,34025,1731883565240, state=OPEN 2024-11-17T22:46:07,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:46:07,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:46:07,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:46:07,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:46:07,351 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:07,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T22:46:07,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,34025,1731883565240 in 303 msec 2024-11-17T22:46:07,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T22:46:07,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 509 msec 2024-11-17T22:46:07,370 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:46:07,370 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T22:46:07,391 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:46:07,393 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,34025,1731883565240, seqNum=-1] 2024-11-17T22:46:07,422 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:46:07,425 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54513, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:46:07,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2300 sec 2024-11-17T22:46:07,448 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731883567448, completionTime=-1 2024-11-17T22:46:07,450 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T22:46:07,450 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-17T22:46:07,479 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-17T22:46:07,479 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731883627479 2024-11-17T22:46:07,479 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731883687479 2024-11-17T22:46:07,479 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-11-17T22:46:07,483 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,42095,1731883564467-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:07,483 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,42095,1731883564467-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:07,483 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,42095,1731883564467-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:07,485 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a6e40b21a48:42095, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:07,486 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:07,486 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:07,492 DEBUG [master/1a6e40b21a48:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T22:46:07,516 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.106sec 2024-11-17T22:46:07,517 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T22:46:07,518 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T22:46:07,518 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T22:46:07,519 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T22:46:07,519 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T22:46:07,520 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,42095,1731883564467-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:46:07,520 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,42095,1731883564467-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T22:46:07,529 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T22:46:07,530 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T22:46:07,531 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,42095,1731883564467-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:46:07,567 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3653b52f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:46:07,570 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-17T22:46:07,570 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-17T22:46:07,574 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a6e40b21a48,42095,-1 for getting cluster id 2024-11-17T22:46:07,578 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T22:46:07,592 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd76533c3-3d0d-4ae2-86bd-a0a58a41ae09' 2024-11-17T22:46:07,599 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T22:46:07,599 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d76533c3-3d0d-4ae2-86bd-a0a58a41ae09" 2024-11-17T22:46:07,602 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e936d11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:46:07,603 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a6e40b21a48,42095,-1] 2024-11-17T22:46:07,606 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T22:46:07,608 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:46:07,612 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58062, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T22:46:07,615 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458c2fcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:46:07,616 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:46:07,626 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,34025,1731883565240, seqNum=-1] 2024-11-17T22:46:07,626 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:46:07,629 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36160, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:46:07,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:07,672 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:46:07,680 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T22:46:07,685 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T22:46:07,699 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a6e40b21a48,42095,1731883564467 2024-11-17T22:46:07,701 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4b0d4644 2024-11-17T22:46:07,702 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T22:46:07,705 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58078, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T22:46:07,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42095 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T22:46:07,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42095 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T22:46:07,716 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42095 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:46:07,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42095 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-17T22:46:07,731 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T22:46:07,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42095 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-17T22:46:07,733 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:07,737 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T22:46:07,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42095 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:46:07,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741835_1011 (size=389) 2024-11-17T22:46:07,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741835_1011 (size=389) 2024-11-17T22:46:07,792 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1430365f56faae0f30cec42339d5f233, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c 2024-11-17T22:46:07,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741836_1012 (size=72) 2024-11-17T22:46:07,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741836_1012 (size=72) 2024-11-17T22:46:07,805 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:46:07,806 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1430365f56faae0f30cec42339d5f233, disabling compactions & flushes 2024-11-17T22:46:07,806 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:46:07,806 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:46:07,806 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. after waiting 0 ms 2024-11-17T22:46:07,806 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:46:07,806 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:46:07,806 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1430365f56faae0f30cec42339d5f233: Waiting for close lock at 1731883567806Disabling compacts and flushes for region at 1731883567806Disabling writes for close at 1731883567806Writing region close event to WAL at 1731883567806Closed at 1731883567806 2024-11-17T22:46:07,808 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T22:46:07,815 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731883567809"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731883567809"}]},"ts":"1731883567809"} 2024-11-17T22:46:07,820 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T22:46:07,822 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T22:46:07,826 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883567823"}]},"ts":"1731883567823"} 2024-11-17T22:46:07,832 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-17T22:46:07,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1430365f56faae0f30cec42339d5f233, ASSIGN}] 2024-11-17T22:46:07,837 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1430365f56faae0f30cec42339d5f233, ASSIGN 2024-11-17T22:46:07,839 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1430365f56faae0f30cec42339d5f233, ASSIGN; state=OFFLINE, location=1a6e40b21a48,34025,1731883565240; forceNewPlan=false, retain=false 2024-11-17T22:46:07,991 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1430365f56faae0f30cec42339d5f233, regionState=OPENING, regionLocation=1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:07,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1430365f56faae0f30cec42339d5f233, ASSIGN because future has completed 2024-11-17T22:46:07,999 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1430365f56faae0f30cec42339d5f233, server=1a6e40b21a48,34025,1731883565240}] 2024-11-17T22:46:08,164 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:46:08,165 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1430365f56faae0f30cec42339d5f233, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:46:08,165 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,166 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:46:08,166 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,166 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,169 INFO [StoreOpener-1430365f56faae0f30cec42339d5f233-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,172 INFO [StoreOpener-1430365f56faae0f30cec42339d5f233-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1430365f56faae0f30cec42339d5f233 columnFamilyName info 2024-11-17T22:46:08,172 DEBUG [StoreOpener-1430365f56faae0f30cec42339d5f233-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:46:08,173 INFO [StoreOpener-1430365f56faae0f30cec42339d5f233-1 {}] regionserver.HStore(327): Store=1430365f56faae0f30cec42339d5f233/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:46:08,173 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,175 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,176 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,177 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,177 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,182 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,186 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:46:08,187 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1430365f56faae0f30cec42339d5f233; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718014, jitterRate=-0.08699846267700195}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T22:46:08,188 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:08,189 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1430365f56faae0f30cec42339d5f233: Running coprocessor pre-open hook at 1731883568166Writing region info on filesystem at 1731883568166Initializing all the Stores at 1731883568168 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883568168Cleaning up temporary data from old regions at 1731883568177 (+9 ms)Running coprocessor post-open hooks at 1731883568188 (+11 ms)Region opened successfully at 1731883568189 (+1 ms) 2024-11-17T22:46:08,191 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233., pid=6, masterSystemTime=1731883568156 2024-11-17T22:46:08,197 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:46:08,197 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:46:08,199 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1430365f56faae0f30cec42339d5f233, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,34025,1731883565240 2024-11-17T22:46:08,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1430365f56faae0f30cec42339d5f233, server=1a6e40b21a48,34025,1731883565240 because future has completed 2024-11-17T22:46:08,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T22:46:08,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1430365f56faae0f30cec42339d5f233, server=1a6e40b21a48,34025,1731883565240 in 206 msec 2024-11-17T22:46:08,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T22:46:08,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1430365f56faae0f30cec42339d5f233, ASSIGN in 376 msec 2024-11-17T22:46:08,217 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T22:46:08,218 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883568217"}]},"ts":"1731883568217"} 2024-11-17T22:46:08,222 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-17T22:46:08,224 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T22:46:08,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 504 msec 2024-11-17T22:46:12,787 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T22:46:12,847 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T22:46:12,849 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-17T22:46:14,903 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T22:46:14,903 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T22:46:14,905 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-17T22:46:14,905 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T22:46:14,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:46:14,906 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T22:46:14,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T22:46:14,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-17T22:46:17,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42095 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:46:17,814 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-17T22:46:17,820 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-17T22:46:17,828 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-17T22:46:17,828 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:46:17,829 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883577829 2024-11-17T22:46:17,839 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:17,839 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:17,840 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:17,840 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:17,840 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:17,840 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883566673 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883577829 2024-11-17T22:46:17,842 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34647:34647),(127.0.0.1/127.0.0.1:38333:38333)] 2024-11-17T22:46:17,842 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883566673 is not closed yet, will try archiving it next time 2024-11-17T22:46:17,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741833_1009 (size=451) 2024-11-17T22:46:17,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741833_1009 (size=451) 2024-11-17T22:46:17,846 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883566673 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs/1a6e40b21a48%2C34025%2C1731883565240.1731883566673 2024-11-17T22:46:17,851 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233., hostname=1a6e40b21a48,34025,1731883565240, seqNum=2] 2024-11-17T22:46:29,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34025 {}] regionserver.HRegion(8855): Flush requested on 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:29,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1430365f56faae0f30cec42339d5f233 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:46:29,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/df987a947faf428c80e9f3f6edf77d26 is 1080, key is row0001/info:/1731883577853/Put/seqid=0 2024-11-17T22:46:29,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741838_1014 (size=12509) 2024-11-17T22:46:29,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741838_1014 (size=12509) 2024-11-17T22:46:29,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/df987a947faf428c80e9f3f6edf77d26 2024-11-17T22:46:30,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/df987a947faf428c80e9f3f6edf77d26 as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/df987a947faf428c80e9f3f6edf77d26 2024-11-17T22:46:30,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/df987a947faf428c80e9f3f6edf77d26, entries=7, sequenceid=11, filesize=12.2 K 2024-11-17T22:46:30,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1430365f56faae0f30cec42339d5f233 in 127ms, sequenceid=11, compaction requested=false 2024-11-17T22:46:30,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1430365f56faae0f30cec42339d5f233: 2024-11-17T22:46:33,503 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T22:46:37,928 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883597927 2024-11-17T22:46:38,141 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 209 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK], DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK]] 2024-11-17T22:46:38,142 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:38,142 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:38,142 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:38,143 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:38,143 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:38,143 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883577829 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883597927 2024-11-17T22:46:38,144 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38333:38333),(127.0.0.1/127.0.0.1:34647:34647)] 2024-11-17T22:46:38,144 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883577829 is not closed yet, will try archiving it next time 2024-11-17T22:46:38,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741837_1013 (size=12399) 2024-11-17T22:46:38,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741837_1013 (size=12399) 2024-11-17T22:46:38,349 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:40,555 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:42,759 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:44,965 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:44,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34025 {}] regionserver.HRegion(8855): Flush requested on 1430365f56faae0f30cec42339d5f233 2024-11-17T22:46:44,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1430365f56faae0f30cec42339d5f233 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:46:45,168 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:45,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/d480e2a9a244493b85701c132584c9a2 is 1080, key is row0008/info:/1731883591916/Put/seqid=0 2024-11-17T22:46:45,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741840_1016 (size=12509) 2024-11-17T22:46:45,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741840_1016 (size=12509) 2024-11-17T22:46:45,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/d480e2a9a244493b85701c132584c9a2 2024-11-17T22:46:45,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/d480e2a9a244493b85701c132584c9a2 as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/d480e2a9a244493b85701c132584c9a2 2024-11-17T22:46:45,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/d480e2a9a244493b85701c132584c9a2, entries=7, sequenceid=21, filesize=12.2 K 2024-11-17T22:46:45,408 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:45,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1430365f56faae0f30cec42339d5f233 in 443ms, sequenceid=21, compaction requested=false 2024-11-17T22:46:45,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1430365f56faae0f30cec42339d5f233: 2024-11-17T22:46:45,409 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-17T22:46:45,409 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:46:45,410 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/df987a947faf428c80e9f3f6edf77d26 because midkey is the same as first or last row 2024-11-17T22:46:47,172 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:47,541 INFO [master/1a6e40b21a48:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T22:46:47,541 INFO [master/1a6e40b21a48:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T22:46:49,380 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:49,385 WARN [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:49,386 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C34025%2C1731883565240:(num 1731883597927) roll requested 2024-11-17T22:46:49,387 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883609387 2024-11-17T22:46:49,598 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:49,599 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:49,599 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:49,599 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:49,599 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:49,599 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:46:49,600 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883597927 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883609387 2024-11-17T22:46:49,602 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38333:38333),(127.0.0.1/127.0.0.1:34647:34647)] 2024-11-17T22:46:49,603 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883597927 is not closed yet, will try archiving it next time 2024-11-17T22:46:49,603 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883577829 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs/1a6e40b21a48%2C34025%2C1731883565240.1731883577829 2024-11-17T22:46:49,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741839_1015 (size=7739) 2024-11-17T22:46:49,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741839_1015 (size=7739) 2024-11-17T22:46:51,589 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:53,166 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1430365f56faae0f30cec42339d5f233, had cached 0 bytes from a total of 25018 2024-11-17T22:46:53,799 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:56,007 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:46:58,216 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:00,221 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T22:47:00,222 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883620221 2024-11-17T22:47:03,503 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T22:47:05,238 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:05,241 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:05,241 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C34025%2C1731883565240:(num 1731883620221) roll requested 2024-11-17T22:47:05,241 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:05,241 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:05,241 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:05,242 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:05,242 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:05,242 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883609387 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883620221 2024-11-17T22:47:05,243 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38333:38333),(127.0.0.1/127.0.0.1:34647:34647)] 2024-11-17T22:47:05,244 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883609387 is not closed yet, will try archiving it next time 2024-11-17T22:47:05,244 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883625244 2024-11-17T22:47:05,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741841_1017 (size=4753) 2024-11-17T22:47:05,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741841_1017 (size=4753) 2024-11-17T22:47:10,248 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:10,248 WARN [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:10,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34025 {}] regionserver.HRegion(8855): Flush requested on 1430365f56faae0f30cec42339d5f233 2024-11-17T22:47:10,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1430365f56faae0f30cec42339d5f233 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:47:10,257 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:10,257 WARN [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:12,250 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T22:47:15,253 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:15,253 WARN [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK], DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK]] 2024-11-17T22:47:15,254 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:15,254 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:15,255 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:15,255 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:15,256 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:15,257 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883620221 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883625244 2024-11-17T22:47:15,259 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34647:34647),(127.0.0.1/127.0.0.1:38333:38333)] 2024-11-17T22:47:15,260 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883620221 is not closed yet, will try archiving it next time 2024-11-17T22:47:15,260 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C34025%2C1731883565240:(num 1731883625244) roll requested 2024-11-17T22:47:15,261 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883635260 2024-11-17T22:47:15,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741842_1018 (size=1569) 2024-11-17T22:47:15,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741842_1018 (size=1569) 2024-11-17T22:47:15,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/f60a8c4a749842d1a1765fe407cf3c85 is 1080, key is row0015/info:/1731883606969/Put/seqid=0 2024-11-17T22:47:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741844_1020 (size=12509) 2024-11-17T22:47:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741844_1020 (size=12509) 2024-11-17T22:47:15,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/f60a8c4a749842d1a1765fe407cf3c85 2024-11-17T22:47:15,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/f60a8c4a749842d1a1765fe407cf3c85 as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/f60a8c4a749842d1a1765fe407cf3c85 2024-11-17T22:47:15,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/f60a8c4a749842d1a1765fe407cf3c85, entries=7, sequenceid=31, filesize=12.2 K 2024-11-17T22:47:20,276 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK], DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK]] 2024-11-17T22:47:20,276 WARN [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK], DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK]] 2024-11-17T22:47:20,295 INFO [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK], DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK]] 2024-11-17T22:47:20,295 WARN [FSHLog-0-hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c-prefix:1a6e40b21a48,34025,1731883565240 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43717,DS-b3d6d1ad-43ba-4cfd-bfa1-922e31f221ac,DISK], DatanodeInfoWithStorage[127.0.0.1:37031,DS-5fe2b393-6f38-48da-812f-2a44bebea2ce,DISK]] 2024-11-17T22:47:20,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1430365f56faae0f30cec42339d5f233 in 10046ms, sequenceid=31, compaction requested=true 2024-11-17T22:47:20,295 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1430365f56faae0f30cec42339d5f233: 2024-11-17T22:47:20,296 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,296 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,296 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-17T22:47:20,296 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:47:20,296 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,296 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/df987a947faf428c80e9f3f6edf77d26 because midkey is the same as first or last row 2024-11-17T22:47:20,297 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,297 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883625244 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883635260 2024-11-17T22:47:20,299 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38333:38333),(127.0.0.1/127.0.0.1:34647:34647)] 2024-11-17T22:47:20,300 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883625244 is not closed yet, will try archiving it next time 2024-11-17T22:47:20,300 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883597927 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs/1a6e40b21a48%2C34025%2C1731883565240.1731883597927 2024-11-17T22:47:20,300 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C34025%2C1731883565240:(num 1731883635260) roll requested 2024-11-17T22:47:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1430365f56faae0f30cec42339d5f233:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:47:20,301 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883640300 2024-11-17T22:47:20,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741843_1019 (size=438) 2024-11-17T22:47:20,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741843_1019 (size=438) 2024-11-17T22:47:20,303 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883609387 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs/1a6e40b21a48%2C34025%2C1731883565240.1731883609387 2024-11-17T22:47:20,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:47:20,303 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:47:20,305 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883620221 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs/1a6e40b21a48%2C34025%2C1731883565240.1731883620221 2024-11-17T22:47:20,307 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:47:20,307 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883625244 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs/1a6e40b21a48%2C34025%2C1731883565240.1731883625244 2024-11-17T22:47:20,309 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.HStore(1541): 1430365f56faae0f30cec42339d5f233/info is initiating minor compaction (all files) 2024-11-17T22:47:20,309 INFO [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1430365f56faae0f30cec42339d5f233/info in TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:47:20,309 INFO [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/df987a947faf428c80e9f3f6edf77d26, hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/d480e2a9a244493b85701c132584c9a2, hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/f60a8c4a749842d1a1765fe407cf3c85] into tmpdir=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp, totalSize=36.6 K 2024-11-17T22:47:20,311 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] compactions.Compactor(225): Compacting df987a947faf428c80e9f3f6edf77d26, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731883577853 2024-11-17T22:47:20,311 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] compactions.Compactor(225): Compacting d480e2a9a244493b85701c132584c9a2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731883591916 2024-11-17T22:47:20,312 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] compactions.Compactor(225): Compacting f60a8c4a749842d1a1765fe407cf3c85, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731883606969 2024-11-17T22:47:20,313 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,313 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,313 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,313 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,313 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,313 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883635260 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883640300 2024-11-17T22:47:20,315 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38333:38333),(127.0.0.1/127.0.0.1:34647:34647)] 2024-11-17T22:47:20,315 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883635260 is not closed yet, will try archiving it next time 2024-11-17T22:47:20,316 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C34025%2C1731883565240.1731883640315 2024-11-17T22:47:20,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741845_1021 (size=93) 2024-11-17T22:47:20,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741845_1021 (size=93) 2024-11-17T22:47:20,319 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883635260 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs/1a6e40b21a48%2C34025%2C1731883565240.1731883635260 2024-11-17T22:47:20,330 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,330 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,330 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,330 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,331 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:20,331 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883640300 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883640315 2024-11-17T22:47:20,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741846_1022 (size=1258) 2024-11-17T22:47:20,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741846_1022 (size=1258) 2024-11-17T22:47:20,341 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34647:34647),(127.0.0.1/127.0.0.1:38333:38333)] 2024-11-17T22:47:20,341 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/WALs/1a6e40b21a48,34025,1731883565240/1a6e40b21a48%2C34025%2C1731883565240.1731883640300 is not closed yet, will try archiving it next time 2024-11-17T22:47:20,353 INFO [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1430365f56faae0f30cec42339d5f233#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:47:20,354 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/e2163d1232864bb3b15327753ca06aad is 1080, key is row0001/info:/1731883577853/Put/seqid=0 2024-11-17T22:47:20,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741848_1024 (size=27710) 2024-11-17T22:47:20,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741848_1024 (size=27710) 2024-11-17T22:47:20,376 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/e2163d1232864bb3b15327753ca06aad as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/e2163d1232864bb3b15327753ca06aad 2024-11-17T22:47:20,392 INFO [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1430365f56faae0f30cec42339d5f233/info of 1430365f56faae0f30cec42339d5f233 into e2163d1232864bb3b15327753ca06aad(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:47:20,393 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1430365f56faae0f30cec42339d5f233: 2024-11-17T22:47:20,394 INFO [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233., storeName=1430365f56faae0f30cec42339d5f233/info, priority=13, startTime=1731883640300; duration=0sec 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/e2163d1232864bb3b15327753ca06aad because midkey is the same as first or last row 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/e2163d1232864bb3b15327753ca06aad because midkey is the same as first or last row 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/e2163d1232864bb3b15327753ca06aad because midkey is the same as first or last row 2024-11-17T22:47:20,395 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:47:20,396 DEBUG [RS:0;1a6e40b21a48:34025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1430365f56faae0f30cec42339d5f233:info 2024-11-17T22:47:32,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34025 {}] regionserver.HRegion(8855): Flush requested on 1430365f56faae0f30cec42339d5f233 2024-11-17T22:47:32,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1430365f56faae0f30cec42339d5f233 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:47:32,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/25fdb320a88044d6a8779c82641654d6 is 1080, key is row0022/info:/1731883640317/Put/seqid=0 2024-11-17T22:47:32,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741849_1025 (size=12509) 2024-11-17T22:47:32,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741849_1025 (size=12509) 2024-11-17T22:47:32,370 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/25fdb320a88044d6a8779c82641654d6 2024-11-17T22:47:32,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/25fdb320a88044d6a8779c82641654d6 as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/25fdb320a88044d6a8779c82641654d6 2024-11-17T22:47:32,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/25fdb320a88044d6a8779c82641654d6, entries=7, sequenceid=42, filesize=12.2 K 2024-11-17T22:47:32,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1430365f56faae0f30cec42339d5f233 in 38ms, sequenceid=42, compaction requested=false 2024-11-17T22:47:32,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1430365f56faae0f30cec42339d5f233: 2024-11-17T22:47:32,391 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-17T22:47:32,391 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:47:32,391 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/e2163d1232864bb3b15327753ca06aad because midkey is the same as first or last row 2024-11-17T22:47:33,504 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T22:47:38,166 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1430365f56faae0f30cec42339d5f233, had cached 0 bytes from a total of 40219 2024-11-17T22:47:40,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T22:47:40,365 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:47:40,365 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:47:40,373 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:40,373 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:40,373 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T22:47:40,373 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T22:47:40,373 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=933253213, stopped=false 2024-11-17T22:47:40,374 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a6e40b21a48,42095,1731883564467 2024-11-17T22:47:40,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:47:40,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:40,375 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:47:40,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:47:40,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:40,375 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:47:40,375 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:47:40,375 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:40,376 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a6e40b21a48,34025,1731883565240' ***** 2024-11-17T22:47:40,376 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T22:47:40,376 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:47:40,376 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:47:40,376 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T22:47:40,376 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T22:47:40,377 INFO [RS:0;1a6e40b21a48:34025 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T22:47:40,377 INFO [RS:0;1a6e40b21a48:34025 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T22:47:40,377 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(3091): Received CLOSE for 1430365f56faae0f30cec42339d5f233 2024-11-17T22:47:40,378 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(959): stopping server 1a6e40b21a48,34025,1731883565240 2024-11-17T22:47:40,378 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:47:40,378 INFO [RS:0;1a6e40b21a48:34025 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a6e40b21a48:34025. 2024-11-17T22:47:40,378 DEBUG [RS:0;1a6e40b21a48:34025 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:47:40,378 DEBUG [RS:0;1a6e40b21a48:34025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:40,378 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1430365f56faae0f30cec42339d5f233, disabling compactions & flushes 2024-11-17T22:47:40,378 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T22:47:40,378 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:47:40,378 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T22:47:40,378 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T22:47:40,378 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:47:40,378 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. after waiting 0 ms 2024-11-17T22:47:40,378 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T22:47:40,378 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:47:40,379 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1430365f56faae0f30cec42339d5f233 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-17T22:47:40,379 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T22:47:40,379 DEBUG [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 1430365f56faae0f30cec42339d5f233=TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.} 2024-11-17T22:47:40,379 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:47:40,379 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:47:40,379 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:47:40,379 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:47:40,379 DEBUG [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1351): Waiting on 1430365f56faae0f30cec42339d5f233, 1588230740 2024-11-17T22:47:40,379 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:47:40,379 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-17T22:47:40,384 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/e64aaeb6336e4b7d81c9490df40ebdd1 is 1080, key is row0029/info:/1731883654355/Put/seqid=0 2024-11-17T22:47:40,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741850_1026 (size=8193) 2024-11-17T22:47:40,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741850_1026 (size=8193) 2024-11-17T22:47:40,396 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/e64aaeb6336e4b7d81c9490df40ebdd1 2024-11-17T22:47:40,401 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/info/b4ba7cbbbaeb4a19953ca7a79e712c32 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233./info:regioninfo/1731883568198/Put/seqid=0 2024-11-17T22:47:40,406 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/.tmp/info/e64aaeb6336e4b7d81c9490df40ebdd1 as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/e64aaeb6336e4b7d81c9490df40ebdd1 2024-11-17T22:47:40,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741851_1027 (size=7016) 2024-11-17T22:47:40,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741851_1027 (size=7016) 2024-11-17T22:47:40,408 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/info/b4ba7cbbbaeb4a19953ca7a79e712c32 2024-11-17T22:47:40,416 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/e64aaeb6336e4b7d81c9490df40ebdd1, entries=3, sequenceid=48, filesize=8.0 K 2024-11-17T22:47:40,417 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1430365f56faae0f30cec42339d5f233 in 39ms, sequenceid=48, compaction requested=true 2024-11-17T22:47:40,418 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/df987a947faf428c80e9f3f6edf77d26, hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/d480e2a9a244493b85701c132584c9a2, hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/f60a8c4a749842d1a1765fe407cf3c85] to archive 2024-11-17T22:47:40,420 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T22:47:40,424 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/df987a947faf428c80e9f3f6edf77d26 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/df987a947faf428c80e9f3f6edf77d26 2024-11-17T22:47:40,426 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/d480e2a9a244493b85701c132584c9a2 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/d480e2a9a244493b85701c132584c9a2 2024-11-17T22:47:40,428 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/f60a8c4a749842d1a1765fe407cf3c85 to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/info/f60a8c4a749842d1a1765fe407cf3c85 2024-11-17T22:47:40,436 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/ns/c6b6ad128e21426693761cbf274d8dec is 43, key is default/ns:d/1731883567429/Put/seqid=0 2024-11-17T22:47:40,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741852_1028 (size=5153) 2024-11-17T22:47:40,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741852_1028 (size=5153) 2024-11-17T22:47:40,438 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a6e40b21a48:42095 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-17T22:47:40,442 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [df987a947faf428c80e9f3f6edf77d26=12509, d480e2a9a244493b85701c132584c9a2=12509, f60a8c4a749842d1a1765fe407cf3c85=12509] 2024-11-17T22:47:40,443 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/ns/c6b6ad128e21426693761cbf274d8dec 2024-11-17T22:47:40,448 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/default/TestLogRolling-testSlowSyncLogRolling/1430365f56faae0f30cec42339d5f233/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-17T22:47:40,450 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:47:40,450 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1430365f56faae0f30cec42339d5f233: Waiting for close lock at 1731883660378Running coprocessor pre-close hooks at 1731883660378Disabling compacts and flushes for region at 1731883660378Disabling writes for close at 1731883660378Obtaining lock to block concurrent updates at 1731883660379 (+1 ms)Preparing flush snapshotting stores in 1430365f56faae0f30cec42339d5f233 at 1731883660379Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731883660379Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. at 1731883660380 (+1 ms)Flushing 1430365f56faae0f30cec42339d5f233/info: creating writer at 1731883660380Flushing 1430365f56faae0f30cec42339d5f233/info: appending metadata at 1731883660383 (+3 ms)Flushing 1430365f56faae0f30cec42339d5f233/info: closing flushed file at 1731883660384 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24cd8524: reopening flushed file at 1731883660405 (+21 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1430365f56faae0f30cec42339d5f233 in 39ms, sequenceid=48, compaction requested=true at 1731883660417 (+12 ms)Writing region close event to WAL at 1731883660443 (+26 ms)Running coprocessor post-close hooks at 1731883660449 (+6 ms)Closed at 1731883660450 (+1 ms) 2024-11-17T22:47:40,451 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731883567708.1430365f56faae0f30cec42339d5f233. 2024-11-17T22:47:40,466 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/table/0a61fb42cd8a44449e404232473274b8 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731883568217/Put/seqid=0 2024-11-17T22:47:40,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741853_1029 (size=5396) 2024-11-17T22:47:40,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741853_1029 (size=5396) 2024-11-17T22:47:40,472 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/table/0a61fb42cd8a44449e404232473274b8 2024-11-17T22:47:40,480 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/info/b4ba7cbbbaeb4a19953ca7a79e712c32 as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/info/b4ba7cbbbaeb4a19953ca7a79e712c32 2024-11-17T22:47:40,489 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/info/b4ba7cbbbaeb4a19953ca7a79e712c32, entries=10, sequenceid=11, filesize=6.9 K 2024-11-17T22:47:40,490 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/ns/c6b6ad128e21426693761cbf274d8dec as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/ns/c6b6ad128e21426693761cbf274d8dec 2024-11-17T22:47:40,499 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/ns/c6b6ad128e21426693761cbf274d8dec, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T22:47:40,501 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/.tmp/table/0a61fb42cd8a44449e404232473274b8 as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/table/0a61fb42cd8a44449e404232473274b8 2024-11-17T22:47:40,508 INFO [regionserver/1a6e40b21a48:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:47:40,509 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/table/0a61fb42cd8a44449e404232473274b8, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T22:47:40,511 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-11-17T22:47:40,516 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T22:47:40,517 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:47:40,517 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:47:40,517 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883660379Running coprocessor pre-close hooks at 1731883660379Disabling compacts and flushes for region at 1731883660379Disabling writes for close at 1731883660379Obtaining lock to block concurrent updates at 1731883660379Preparing flush snapshotting stores in 1588230740 at 1731883660379Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731883660380 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731883660380Flushing 1588230740/info: creating writer at 1731883660381 (+1 ms)Flushing 1588230740/info: appending metadata at 1731883660400 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731883660400Flushing 1588230740/ns: creating writer at 1731883660417 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731883660435 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731883660435Flushing 1588230740/table: creating writer at 1731883660452 (+17 ms)Flushing 1588230740/table: appending metadata at 1731883660466 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731883660466Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@204e3cfb: reopening flushed file at 1731883660479 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@575a530b: reopening flushed file at 1731883660489 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35aa2cc8: reopening flushed file at 1731883660500 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1731883660511 (+11 ms)Writing region close event to WAL at 1731883660512 (+1 ms)Running coprocessor post-close hooks at 1731883660517 (+5 ms)Closed at 1731883660517 2024-11-17T22:47:40,518 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T22:47:40,525 INFO [regionserver/1a6e40b21a48:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T22:47:40,525 INFO [regionserver/1a6e40b21a48:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T22:47:40,580 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(976): stopping server 1a6e40b21a48,34025,1731883565240; all regions closed. 2024-11-17T22:47:40,583 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,583 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,583 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,584 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,584 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741834_1010 (size=3066) 2024-11-17T22:47:40,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741834_1010 (size=3066) 2024-11-17T22:47:40,593 DEBUG [RS:0;1a6e40b21a48:34025 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs 2024-11-17T22:47:40,593 INFO [RS:0;1a6e40b21a48:34025 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C34025%2C1731883565240.meta:.meta(num 1731883567246) 2024-11-17T22:47:40,594 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,594 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,594 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,594 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,594 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741847_1023 (size=12695) 2024-11-17T22:47:40,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741847_1023 (size=12695) 2024-11-17T22:47:40,600 DEBUG [RS:0;1a6e40b21a48:34025 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/oldWALs 2024-11-17T22:47:40,600 INFO [RS:0;1a6e40b21a48:34025 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C34025%2C1731883565240:(num 1731883640315) 2024-11-17T22:47:40,600 DEBUG [RS:0;1a6e40b21a48:34025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:40,600 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:47:40,601 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:47:40,601 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.ChoreService(370): Chore service for: regionserver/1a6e40b21a48:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T22:47:40,601 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:47:40,601 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:47:40,602 INFO [RS:0;1a6e40b21a48:34025 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34025 2024-11-17T22:47:40,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:47:40,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a6e40b21a48,34025,1731883565240 2024-11-17T22:47:40,605 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:47:40,606 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a6e40b21a48,34025,1731883565240] 2024-11-17T22:47:40,607 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a6e40b21a48,34025,1731883565240 already deleted, retry=false 2024-11-17T22:47:40,607 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a6e40b21a48,34025,1731883565240 expired; onlineServers=0 2024-11-17T22:47:40,607 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a6e40b21a48,42095,1731883564467' ***** 2024-11-17T22:47:40,607 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T22:47:40,607 INFO [M:0;1a6e40b21a48:42095 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:47:40,607 INFO [M:0;1a6e40b21a48:42095 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:47:40,607 DEBUG [M:0;1a6e40b21a48:42095 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T22:47:40,608 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T22:47:40,608 DEBUG [M:0;1a6e40b21a48:42095 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T22:47:40,608 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883566351 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883566351,5,FailOnTimeoutGroup] 2024-11-17T22:47:40,608 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883566358 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883566358,5,FailOnTimeoutGroup] 2024-11-17T22:47:40,608 INFO [M:0;1a6e40b21a48:42095 {}] hbase.ChoreService(370): Chore service for: master/1a6e40b21a48:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T22:47:40,608 INFO [M:0;1a6e40b21a48:42095 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:47:40,608 DEBUG [M:0;1a6e40b21a48:42095 {}] master.HMaster(1795): Stopping service threads 2024-11-17T22:47:40,608 INFO [M:0;1a6e40b21a48:42095 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T22:47:40,608 INFO [M:0;1a6e40b21a48:42095 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:47:40,609 INFO [M:0;1a6e40b21a48:42095 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T22:47:40,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T22:47:40,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:40,609 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T22:47:40,609 DEBUG [M:0;1a6e40b21a48:42095 {}] zookeeper.ZKUtil(347): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T22:47:40,609 WARN [M:0;1a6e40b21a48:42095 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T22:47:40,610 INFO [M:0;1a6e40b21a48:42095 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/.lastflushedseqids 2024-11-17T22:47:40,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741854_1030 (size=130) 2024-11-17T22:47:40,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741854_1030 (size=130) 2024-11-17T22:47:40,621 INFO [M:0;1a6e40b21a48:42095 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T22:47:40,622 INFO [M:0;1a6e40b21a48:42095 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T22:47:40,622 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:47:40,622 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:40,622 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:40,622 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:47:40,622 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:40,622 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-17T22:47:40,641 DEBUG [M:0;1a6e40b21a48:42095 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b27dd0f90dd4418bba32fb7c85c5e48c is 82, key is hbase:meta,,1/info:regioninfo/1731883567342/Put/seqid=0 2024-11-17T22:47:40,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741855_1031 (size=5672) 2024-11-17T22:47:40,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741855_1031 (size=5672) 2024-11-17T22:47:40,648 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b27dd0f90dd4418bba32fb7c85c5e48c 2024-11-17T22:47:40,671 DEBUG [M:0;1a6e40b21a48:42095 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ea10b6a9c7264d8fb693647b8999fdef is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731883568226/Put/seqid=0 2024-11-17T22:47:40,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741856_1032 (size=6248) 2024-11-17T22:47:40,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741856_1032 (size=6248) 2024-11-17T22:47:40,677 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ea10b6a9c7264d8fb693647b8999fdef 2024-11-17T22:47:40,683 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ea10b6a9c7264d8fb693647b8999fdef 2024-11-17T22:47:40,700 DEBUG [M:0;1a6e40b21a48:42095 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e72ae12a8291455493ce1fc69a5b9140 is 69, key is 1a6e40b21a48,34025,1731883565240/rs:state/1731883566398/Put/seqid=0 2024-11-17T22:47:40,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741857_1033 (size=5156) 2024-11-17T22:47:40,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741857_1033 (size=5156) 2024-11-17T22:47:40,706 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e72ae12a8291455493ce1fc69a5b9140 2024-11-17T22:47:40,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:47:40,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34025-0x1004fdd569d0001, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:47:40,707 INFO [RS:0;1a6e40b21a48:34025 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:47:40,708 INFO [RS:0;1a6e40b21a48:34025 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a6e40b21a48,34025,1731883565240; zookeeper connection closed. 2024-11-17T22:47:40,708 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6e3c587 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6e3c587 2024-11-17T22:47:40,708 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T22:47:40,731 DEBUG [M:0;1a6e40b21a48:42095 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4a0c69c312834365a2bbd5a856a9e48e is 52, key is load_balancer_on/state:d/1731883567677/Put/seqid=0 2024-11-17T22:47:40,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741858_1034 (size=5056) 2024-11-17T22:47:40,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741858_1034 (size=5056) 2024-11-17T22:47:40,738 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4a0c69c312834365a2bbd5a856a9e48e 2024-11-17T22:47:40,745 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b27dd0f90dd4418bba32fb7c85c5e48c as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b27dd0f90dd4418bba32fb7c85c5e48c 2024-11-17T22:47:40,752 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b27dd0f90dd4418bba32fb7c85c5e48c, entries=8, sequenceid=59, filesize=5.5 K 2024-11-17T22:47:40,753 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ea10b6a9c7264d8fb693647b8999fdef as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ea10b6a9c7264d8fb693647b8999fdef 2024-11-17T22:47:40,760 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ea10b6a9c7264d8fb693647b8999fdef 2024-11-17T22:47:40,760 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ea10b6a9c7264d8fb693647b8999fdef, entries=6, sequenceid=59, filesize=6.1 K 2024-11-17T22:47:40,761 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e72ae12a8291455493ce1fc69a5b9140 as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e72ae12a8291455493ce1fc69a5b9140 2024-11-17T22:47:40,767 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e72ae12a8291455493ce1fc69a5b9140, entries=1, sequenceid=59, filesize=5.0 K 2024-11-17T22:47:40,768 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4a0c69c312834365a2bbd5a856a9e48e as hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4a0c69c312834365a2bbd5a856a9e48e 2024-11-17T22:47:40,775 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4a0c69c312834365a2bbd5a856a9e48e, entries=1, sequenceid=59, filesize=4.9 K 2024-11-17T22:47:40,776 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=59, compaction requested=false 2024-11-17T22:47:40,777 INFO [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:40,778 DEBUG [M:0;1a6e40b21a48:42095 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883660622Disabling compacts and flushes for region at 1731883660622Disabling writes for close at 1731883660622Obtaining lock to block concurrent updates at 1731883660622Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731883660622Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731883660623 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731883660624 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731883660624Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731883660641 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731883660641Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731883660654 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731883660670 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731883660670Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731883660684 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731883660699 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731883660699Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731883660714 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731883660730 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731883660731 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77a917ba: reopening flushed file at 1731883660744 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c98fceb: reopening flushed file at 1731883660752 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13a5291b: reopening flushed file at 1731883660760 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40b824c1: reopening flushed file at 1731883660767 (+7 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=59, compaction requested=false at 1731883660776 (+9 ms)Writing region close event to WAL at 1731883660777 (+1 ms)Closed at 1731883660777 2024-11-17T22:47:40,778 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,779 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,779 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,779 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,779 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:40,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43717 is added to blk_1073741830_1006 (size=27985) 2024-11-17T22:47:40,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37031 is added to blk_1073741830_1006 (size=27985) 2024-11-17T22:47:40,782 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:47:40,782 INFO [M:0;1a6e40b21a48:42095 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T22:47:40,782 INFO [M:0;1a6e40b21a48:42095 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42095 2024-11-17T22:47:40,782 INFO [M:0;1a6e40b21a48:42095 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:47:40,884 INFO [M:0;1a6e40b21a48:42095 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:47:40,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:47:40,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42095-0x1004fdd569d0000, quorum=127.0.0.1:63688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:47:40,888 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1467625d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:40,890 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:40,890 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:40,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:40,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:40,893 WARN [BP-1157280779-172.17.0.2-1731883561327 heartbeating to localhost/127.0.0.1:40071 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:47:40,893 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:47:40,893 WARN [BP-1157280779-172.17.0.2-1731883561327 heartbeating to localhost/127.0.0.1:40071 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1157280779-172.17.0.2-1731883561327 (Datanode Uuid b6c6a6f3-efec-4b27-9acc-82e43e164a85) service to localhost/127.0.0.1:40071 2024-11-17T22:47:40,893 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:47:40,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/data/data3/current/BP-1157280779-172.17.0.2-1731883561327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:40,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/data/data4/current/BP-1157280779-172.17.0.2-1731883561327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:40,895 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:47:40,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c2fdbac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:40,898 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:40,898 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:40,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:40,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:40,900 WARN [BP-1157280779-172.17.0.2-1731883561327 heartbeating to localhost/127.0.0.1:40071 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:47:40,900 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:47:40,900 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:47:40,900 WARN [BP-1157280779-172.17.0.2-1731883561327 heartbeating to localhost/127.0.0.1:40071 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1157280779-172.17.0.2-1731883561327 (Datanode Uuid c098af76-f143-4cba-acce-c27b646b038b) service to localhost/127.0.0.1:40071 2024-11-17T22:47:40,901 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/data/data1/current/BP-1157280779-172.17.0.2-1731883561327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:40,901 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/cluster_d3b06ad8-e144-68c8-0018-2b0b8315e99f/data/data2/current/BP-1157280779-172.17.0.2-1731883561327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:40,901 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:47:40,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:47:40,911 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:40,911 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:40,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:40,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:40,923 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T22:47:40,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T22:47:40,966 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40071 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40071 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/1a6e40b21a48:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@7a3d4a7b java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40071 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:40071 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40071 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40071 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/1a6e40b21a48:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40071 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40071 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/1a6e40b21a48:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=408 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=145 (was 318), ProcessCount=11 (was 11), AvailableMemoryMB=4910 (was 5502) 2024-11-17T22:47:40,973 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=408, MaxFileDescriptor=1048576, SystemLoadAverage=145, ProcessCount=11, AvailableMemoryMB=4909 2024-11-17T22:47:40,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T22:47:40,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.log.dir so I do NOT create it in target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b 2024-11-17T22:47:40,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c24b04fe-6de1-5f4b-6e3c-f779d61004f4/hadoop.tmp.dir so I do NOT create it in target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b 2024-11-17T22:47:40,974 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b, deleteOnExit=true 2024-11-17T22:47:40,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T22:47:40,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/test.cache.data in system properties and HBase conf 2024-11-17T22:47:40,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T22:47:40,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.log.dir in system properties and HBase conf 2024-11-17T22:47:40,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T22:47:40,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T22:47:40,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T22:47:40,975 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T22:47:40,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:47:40,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:47:40,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T22:47:40,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/nfs.dump.dir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/java.io.tmpdir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T22:47:40,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T22:47:40,991 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:47:41,050 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:41,061 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:41,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:41,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:41,062 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:47:41,064 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:41,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3c3ceb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:41,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a69944b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:41,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3235d5ba{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/java.io.tmpdir/jetty-localhost-44053-hadoop-hdfs-3_4_1-tests_jar-_-any-3266218850669410686/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:47:41,162 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@347af0d{HTTP/1.1, (http/1.1)}{localhost:44053} 2024-11-17T22:47:41,162 INFO [Time-limited test {}] server.Server(415): Started @102186ms 2024-11-17T22:47:41,174 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:47:41,226 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:41,230 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:41,230 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:41,230 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:41,231 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:47:41,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@607b9bc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:41,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@670e4080{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:41,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61e52b83{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/java.io.tmpdir/jetty-localhost-37605-hadoop-hdfs-3_4_1-tests_jar-_-any-7749729191451598630/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:41,327 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29a18ee0{HTTP/1.1, (http/1.1)}{localhost:37605} 2024-11-17T22:47:41,327 INFO [Time-limited test {}] server.Server(415): Started @102352ms 2024-11-17T22:47:41,329 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:47:41,367 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:41,371 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:41,371 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:41,371 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:41,371 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:47:41,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d944f53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:41,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f27499{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:41,405 WARN [Thread-441 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/data/data2/current/BP-1935376058-172.17.0.2-1731883661003/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:41,405 WARN [Thread-440 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/data/data1/current/BP-1935376058-172.17.0.2-1731883661003/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:41,425 WARN [Thread-419 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:47:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x116b26c93ccf89c8 with lease ID 0x4e53c0904609feff: Processing first storage report for DS-422ece04-7720-4221-8356-81350f341b99 from datanode DatanodeRegistration(127.0.0.1:39137, datanodeUuid=45a4c0b5-8a22-4d6a-a187-5a3805f7db6f, infoPort=42013, infoSecurePort=0, ipcPort=37877, storageInfo=lv=-57;cid=testClusterID;nsid=862365425;c=1731883661003) 2024-11-17T22:47:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x116b26c93ccf89c8 with lease ID 0x4e53c0904609feff: from storage DS-422ece04-7720-4221-8356-81350f341b99 node DatanodeRegistration(127.0.0.1:39137, datanodeUuid=45a4c0b5-8a22-4d6a-a187-5a3805f7db6f, infoPort=42013, infoSecurePort=0, ipcPort=37877, storageInfo=lv=-57;cid=testClusterID;nsid=862365425;c=1731883661003), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x116b26c93ccf89c8 with lease ID 0x4e53c0904609feff: Processing first storage report for DS-5f1414d3-38c6-44ae-9968-92e2c87f377d from datanode DatanodeRegistration(127.0.0.1:39137, datanodeUuid=45a4c0b5-8a22-4d6a-a187-5a3805f7db6f, infoPort=42013, infoSecurePort=0, ipcPort=37877, storageInfo=lv=-57;cid=testClusterID;nsid=862365425;c=1731883661003) 2024-11-17T22:47:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x116b26c93ccf89c8 with lease ID 0x4e53c0904609feff: from storage DS-5f1414d3-38c6-44ae-9968-92e2c87f377d node DatanodeRegistration(127.0.0.1:39137, datanodeUuid=45a4c0b5-8a22-4d6a-a187-5a3805f7db6f, infoPort=42013, infoSecurePort=0, ipcPort=37877, storageInfo=lv=-57;cid=testClusterID;nsid=862365425;c=1731883661003), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:41,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cebd4b6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/java.io.tmpdir/jetty-localhost-32791-hadoop-hdfs-3_4_1-tests_jar-_-any-17661134749087248043/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:41,472 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e6ff2f3{HTTP/1.1, (http/1.1)}{localhost:32791} 2024-11-17T22:47:41,472 INFO [Time-limited test {}] server.Server(415): Started @102496ms 2024-11-17T22:47:41,473 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:47:41,534 WARN [Thread-467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/data/data4/current/BP-1935376058-172.17.0.2-1731883661003/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:41,534 WARN [Thread-466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/data/data3/current/BP-1935376058-172.17.0.2-1731883661003/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:41,556 WARN [Thread-455 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:47:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd571941fb02e9e6f with lease ID 0x4e53c0904609ff00: Processing first storage report for DS-fe5caec3-5833-4678-8ee4-72c5318ab596 from datanode DatanodeRegistration(127.0.0.1:33673, datanodeUuid=d8807b53-c8aa-48d0-8017-083a7178aa2c, infoPort=39161, infoSecurePort=0, ipcPort=40469, storageInfo=lv=-57;cid=testClusterID;nsid=862365425;c=1731883661003) 2024-11-17T22:47:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd571941fb02e9e6f with lease ID 0x4e53c0904609ff00: from storage DS-fe5caec3-5833-4678-8ee4-72c5318ab596 node DatanodeRegistration(127.0.0.1:33673, datanodeUuid=d8807b53-c8aa-48d0-8017-083a7178aa2c, infoPort=39161, infoSecurePort=0, ipcPort=40469, storageInfo=lv=-57;cid=testClusterID;nsid=862365425;c=1731883661003), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd571941fb02e9e6f with lease ID 0x4e53c0904609ff00: Processing first storage report for DS-b8bb7512-dc1d-47cb-a839-79b818331dea from datanode DatanodeRegistration(127.0.0.1:33673, datanodeUuid=d8807b53-c8aa-48d0-8017-083a7178aa2c, infoPort=39161, infoSecurePort=0, ipcPort=40469, storageInfo=lv=-57;cid=testClusterID;nsid=862365425;c=1731883661003) 2024-11-17T22:47:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd571941fb02e9e6f with lease ID 0x4e53c0904609ff00: from storage DS-b8bb7512-dc1d-47cb-a839-79b818331dea node DatanodeRegistration(127.0.0.1:33673, datanodeUuid=d8807b53-c8aa-48d0-8017-083a7178aa2c, infoPort=39161, infoSecurePort=0, ipcPort=40469, storageInfo=lv=-57;cid=testClusterID;nsid=862365425;c=1731883661003), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:41,600 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b 2024-11-17T22:47:41,610 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/zookeeper_0, clientPort=52293, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T22:47:41,611 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52293 2024-11-17T22:47:41,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:41,613 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:41,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:47:41,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:47:41,627 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010 with version=8 2024-11-17T22:47:41,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase-staging 2024-11-17T22:47:41,630 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:47:41,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:41,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:41,630 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:47:41,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:41,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:47:41,630 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T22:47:41,630 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:47:41,631 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40143 2024-11-17T22:47:41,633 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40143 connecting to ZooKeeper ensemble=127.0.0.1:52293 2024-11-17T22:47:41,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:401430x0, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:47:41,638 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40143-0x1004fded5940000 connected 2024-11-17T22:47:41,659 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:41,661 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:41,664 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:47:41,664 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010, hbase.cluster.distributed=false 2024-11-17T22:47:41,666 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:47:41,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40143 2024-11-17T22:47:41,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40143 2024-11-17T22:47:41,670 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40143 2024-11-17T22:47:41,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40143 2024-11-17T22:47:41,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40143 2024-11-17T22:47:41,687 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:47:41,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:41,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:41,687 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:47:41,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:41,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:47:41,687 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T22:47:41,688 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:47:41,689 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35021 2024-11-17T22:47:41,691 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35021 connecting to ZooKeeper ensemble=127.0.0.1:52293 2024-11-17T22:47:41,691 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:41,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:41,698 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350210x0, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:47:41,698 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:47:41,698 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35021-0x1004fded5940001 connected 2024-11-17T22:47:41,699 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T22:47:41,699 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T22:47:41,700 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T22:47:41,701 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:47:41,709 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35021 2024-11-17T22:47:41,709 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35021 2024-11-17T22:47:41,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35021 2024-11-17T22:47:41,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35021 2024-11-17T22:47:41,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35021 2024-11-17T22:47:41,730 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a6e40b21a48:40143 2024-11-17T22:47:41,732 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:41,733 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:47:41,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:47:41,734 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:41,735 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T22:47:41,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:41,735 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:41,736 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T22:47:41,736 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a6e40b21a48,40143,1731883661629 from backup master directory 2024-11-17T22:47:41,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:41,738 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:47:41,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:47:41,738 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:41,738 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:47:41,744 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/hbase.id] with ID: 05f1f4bb-5e6a-4550-b45e-68ea8933d125 2024-11-17T22:47:41,744 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/.tmp/hbase.id 2024-11-17T22:47:41,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:47:41,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:47:41,756 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/.tmp/hbase.id]:[hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/hbase.id] 2024-11-17T22:47:41,772 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:41,772 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T22:47:41,775 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T22:47:41,776 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:41,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:41,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:47:41,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:47:42,193 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:47:42,197 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T22:47:42,197 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:47:42,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:47:42,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:47:42,212 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store 2024-11-17T22:47:42,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:47:42,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:47:42,220 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:42,220 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:47:42,221 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:42,221 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:42,221 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:47:42,221 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:42,221 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:42,221 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883662220Disabling compacts and flushes for region at 1731883662220Disabling writes for close at 1731883662221 (+1 ms)Writing region close event to WAL at 1731883662221Closed at 1731883662221 2024-11-17T22:47:42,223 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/.initializing 2024-11-17T22:47:42,223 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/WALs/1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:42,227 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C40143%2C1731883661629, suffix=, logDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/WALs/1a6e40b21a48,40143,1731883661629, archiveDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/oldWALs, maxLogs=10 2024-11-17T22:47:42,227 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C40143%2C1731883661629.1731883662227 2024-11-17T22:47:42,233 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/WALs/1a6e40b21a48,40143,1731883661629/1a6e40b21a48%2C40143%2C1731883661629.1731883662227 2024-11-17T22:47:42,234 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:42013:42013)] 2024-11-17T22:47:42,235 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:47:42,235 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:42,235 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,235 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,237 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,239 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T22:47:42,239 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,240 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,240 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,242 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T22:47:42,242 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:47:42,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T22:47:42,247 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,247 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:47:42,248 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,249 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T22:47:42,249 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,250 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:47:42,250 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,251 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,252 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,253 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,253 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,254 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T22:47:42,255 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:42,257 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:47:42,258 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795536, jitterRate=0.011577457189559937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T22:47:42,259 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731883662235Initializing all the Stores at 1731883662236 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883662236Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883662237 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883662237Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883662237Cleaning up temporary data from old regions at 1731883662253 (+16 ms)Region opened successfully at 1731883662259 (+6 ms) 2024-11-17T22:47:42,259 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T22:47:42,263 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@132a228e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:47:42,264 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T22:47:42,264 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T22:47:42,264 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T22:47:42,264 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T22:47:42,265 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T22:47:42,266 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T22:47:42,266 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T22:47:42,268 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T22:47:42,269 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T22:47:42,270 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T22:47:42,270 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T22:47:42,271 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T22:47:42,272 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T22:47:42,272 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T22:47:42,273 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T22:47:42,274 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T22:47:42,275 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T22:47:42,276 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T22:47:42,278 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T22:47:42,279 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T22:47:42,280 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:47:42,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:47:42,280 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:42,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:42,281 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a6e40b21a48,40143,1731883661629, sessionid=0x1004fded5940000, setting cluster-up flag (Was=false) 2024-11-17T22:47:42,282 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:42,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:42,285 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T22:47:42,286 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:42,288 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:42,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:42,291 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T22:47:42,292 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:42,293 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T22:47:42,295 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T22:47:42,296 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T22:47:42,296 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T22:47:42,296 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a6e40b21a48,40143,1731883661629 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T22:47:42,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:47:42,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:47:42,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:47:42,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:47:42,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a6e40b21a48:0, corePoolSize=10, maxPoolSize=10 2024-11-17T22:47:42,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:47:42,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,299 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731883692299 2024-11-17T22:47:42,299 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T22:47:42,299 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T22:47:42,299 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T22:47:42,299 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T22:47:42,299 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T22:47:42,300 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T22:47:42,300 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,300 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T22:47:42,300 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:47:42,300 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T22:47:42,300 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T22:47:42,300 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T22:47:42,301 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T22:47:42,301 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T22:47:42,301 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883662301,5,FailOnTimeoutGroup] 2024-11-17T22:47:42,301 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883662301,5,FailOnTimeoutGroup] 2024-11-17T22:47:42,301 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,301 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T22:47:42,301 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,301 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,302 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,302 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T22:47:42,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:47:42,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:47:42,313 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T22:47:42,313 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010 2024-11-17T22:47:42,315 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(746): ClusterId : 05f1f4bb-5e6a-4550-b45e-68ea8933d125 2024-11-17T22:47:42,315 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T22:47:42,318 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T22:47:42,318 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T22:47:42,320 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T22:47:42,321 DEBUG [RS:0;1a6e40b21a48:35021 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f4e1f25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:47:42,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:47:42,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:47:42,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:42,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:47:42,328 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:47:42,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:47:42,330 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:47:42,330 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:47:42,332 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:47:42,332 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:47:42,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:47:42,335 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:47:42,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740 2024-11-17T22:47:42,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740 2024-11-17T22:47:42,337 DEBUG [RS:0;1a6e40b21a48:35021 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a6e40b21a48:35021 2024-11-17T22:47:42,337 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T22:47:42,337 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T22:47:42,337 DEBUG [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T22:47:42,338 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a6e40b21a48,40143,1731883661629 with port=35021, startcode=1731883661687 2024-11-17T22:47:42,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:47:42,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:47:42,338 DEBUG [RS:0;1a6e40b21a48:35021 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T22:47:42,338 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:47:42,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:47:42,340 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50481, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T22:47:42,341 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40143 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,341 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40143 {}] master.ServerManager(517): Registering regionserver=1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,343 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:47:42,343 DEBUG [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010 2024-11-17T22:47:42,343 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797279, jitterRate=0.013793215155601501}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:47:42,343 DEBUG [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44627 2024-11-17T22:47:42,343 DEBUG [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T22:47:42,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731883662325Initializing all the Stores at 1731883662326 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883662326Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883662326Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883662326Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883662326Cleaning up temporary data from old regions at 1731883662338 (+12 ms)Region opened successfully at 1731883662344 (+6 ms) 2024-11-17T22:47:42,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:47:42,344 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:47:42,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:47:42,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:47:42,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:47:42,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:47:42,345 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:47:42,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883662344Disabling compacts and flushes for region at 1731883662344Disabling writes for close at 1731883662344Writing region close event to WAL at 1731883662345 (+1 ms)Closed at 1731883662345 2024-11-17T22:47:42,345 DEBUG [RS:0;1a6e40b21a48:35021 {}] zookeeper.ZKUtil(111): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,345 WARN [RS:0;1a6e40b21a48:35021 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:47:42,345 INFO [RS:0;1a6e40b21a48:35021 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:47:42,346 DEBUG [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/WALs/1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,346 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a6e40b21a48,35021,1731883661687] 2024-11-17T22:47:42,346 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:47:42,347 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T22:47:42,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T22:47:42,348 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:47:42,349 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T22:47:42,356 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T22:47:42,358 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T22:47:42,360 INFO [RS:0;1a6e40b21a48:35021 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:47:42,360 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,361 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T22:47:42,362 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T22:47:42,362 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,362 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,363 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,363 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,363 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:42,363 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:47:42,363 DEBUG [RS:0;1a6e40b21a48:35021 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:47:42,363 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,363 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,363 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,363 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,364 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,364 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,35021,1731883661687-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:47:42,378 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T22:47:42,379 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,35021,1731883661687-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,379 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,379 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.Replication(171): 1a6e40b21a48,35021,1731883661687 started 2024-11-17T22:47:42,393 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,394 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1482): Serving as 1a6e40b21a48,35021,1731883661687, RpcServer on 1a6e40b21a48/172.17.0.2:35021, sessionid=0x1004fded5940001 2024-11-17T22:47:42,394 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T22:47:42,394 DEBUG [RS:0;1a6e40b21a48:35021 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,394 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,35021,1731883661687' 2024-11-17T22:47:42,394 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T22:47:42,395 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T22:47:42,395 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T22:47:42,395 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T22:47:42,395 DEBUG [RS:0;1a6e40b21a48:35021 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,395 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,35021,1731883661687' 2024-11-17T22:47:42,395 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T22:47:42,396 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T22:47:42,396 DEBUG [RS:0;1a6e40b21a48:35021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T22:47:42,396 INFO [RS:0;1a6e40b21a48:35021 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T22:47:42,397 INFO [RS:0;1a6e40b21a48:35021 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T22:47:42,499 INFO [RS:0;1a6e40b21a48:35021 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C35021%2C1731883661687, suffix=, logDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/WALs/1a6e40b21a48,35021,1731883661687, archiveDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/oldWALs, maxLogs=32 2024-11-17T22:47:42,500 WARN [1a6e40b21a48:40143 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-17T22:47:42,501 INFO [RS:0;1a6e40b21a48:35021 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C35021%2C1731883661687.1731883662501 2024-11-17T22:47:42,508 INFO [RS:0;1a6e40b21a48:35021 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/WALs/1a6e40b21a48,35021,1731883661687/1a6e40b21a48%2C35021%2C1731883661687.1731883662501 2024-11-17T22:47:42,509 DEBUG [RS:0;1a6e40b21a48:35021 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:42013:42013)] 2024-11-17T22:47:42,750 DEBUG [1a6e40b21a48:40143 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T22:47:42,752 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,757 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,35021,1731883661687, state=OPENING 2024-11-17T22:47:42,760 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T22:47:42,761 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:42,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:42,762 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:47:42,762 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:47:42,762 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:47:42,762 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,35021,1731883661687}] 2024-11-17T22:47:42,917 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T22:47:42,922 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48953, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T22:47:42,930 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T22:47:42,931 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:47:42,934 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C35021%2C1731883661687.meta, suffix=.meta, logDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/WALs/1a6e40b21a48,35021,1731883661687, archiveDir=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/oldWALs, maxLogs=32 2024-11-17T22:47:42,937 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C35021%2C1731883661687.meta.1731883662937.meta 2024-11-17T22:47:42,943 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/WALs/1a6e40b21a48,35021,1731883661687/1a6e40b21a48%2C35021%2C1731883661687.meta.1731883662937.meta 2024-11-17T22:47:42,944 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42013:42013),(127.0.0.1/127.0.0.1:39161:39161)] 2024-11-17T22:47:42,944 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:47:42,945 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T22:47:42,945 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T22:47:42,945 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T22:47:42,945 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T22:47:42,945 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:42,945 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T22:47:42,945 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T22:47:42,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:47:42,950 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:47:42,950 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:47:42,952 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:47:42,952 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:47:42,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:47:42,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:47:42,956 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:47:42,956 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:42,956 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:42,956 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:47:42,957 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740 2024-11-17T22:47:42,959 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740 2024-11-17T22:47:42,960 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:47:42,960 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:47:42,961 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:47:42,962 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:47:42,963 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689884, jitterRate=-0.12276816368103027}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:47:42,963 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T22:47:42,964 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731883662946Writing region info on filesystem at 1731883662946Initializing all the Stores at 1731883662947 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883662947Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883662949 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883662949Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883662949Cleaning up temporary data from old regions at 1731883662960 (+11 ms)Running coprocessor post-open hooks at 1731883662963 (+3 ms)Region opened successfully at 1731883662964 (+1 ms) 2024-11-17T22:47:42,965 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731883662916 2024-11-17T22:47:42,968 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T22:47:42,968 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T22:47:42,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,970 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,35021,1731883661687, state=OPEN 2024-11-17T22:47:42,972 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:47:42,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:47:42,972 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:42,972 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:47:42,972 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:47:42,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T22:47:42,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,35021,1731883661687 in 210 msec 2024-11-17T22:47:42,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T22:47:42,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 629 msec 2024-11-17T22:47:42,980 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:47:42,981 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T22:47:42,982 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:47:42,982 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,35021,1731883661687, seqNum=-1] 2024-11-17T22:47:42,983 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:47:42,984 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45059, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:47:42,992 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 695 msec 2024-11-17T22:47:42,992 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731883662992, completionTime=-1 2024-11-17T22:47:42,992 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T22:47:42,993 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731883722995 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731883782995 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,40143,1731883661629-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,40143,1731883661629-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,40143,1731883661629-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a6e40b21a48:40143, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,995 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,996 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:42,998 DEBUG [master/1a6e40b21a48:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T22:47:43,000 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.262sec 2024-11-17T22:47:43,001 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T22:47:43,001 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T22:47:43,001 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T22:47:43,001 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T22:47:43,001 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T22:47:43,001 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,40143,1731883661629-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:47:43,001 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,40143,1731883661629-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T22:47:43,004 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T22:47:43,004 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T22:47:43,004 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,40143,1731883661629-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:43,015 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab35378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:47:43,015 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a6e40b21a48,40143,-1 for getting cluster id 2024-11-17T22:47:43,015 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T22:47:43,017 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '05f1f4bb-5e6a-4550-b45e-68ea8933d125' 2024-11-17T22:47:43,018 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T22:47:43,018 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "05f1f4bb-5e6a-4550-b45e-68ea8933d125" 2024-11-17T22:47:43,019 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8f8429, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:47:43,019 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a6e40b21a48,40143,-1] 2024-11-17T22:47:43,019 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T22:47:43,019 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:43,021 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T22:47:43,023 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fa3cb60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:47:43,024 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:47:43,025 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,35021,1731883661687, seqNum=-1] 2024-11-17T22:47:43,026 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:47:43,028 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:47:43,031 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:43,032 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:43,036 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T22:47:43,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T22:47:43,036 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:47:43,037 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:47:43,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:43,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:43,037 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T22:47:43,037 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T22:47:43,037 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=398545913, stopped=false 2024-11-17T22:47:43,038 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a6e40b21a48,40143,1731883661629 2024-11-17T22:47:43,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:47:43,039 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:47:43,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:43,039 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:47:43,039 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:43,039 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:47:43,039 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:47:43,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:43,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:47:43,040 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a6e40b21a48,35021,1731883661687' ***** 2024-11-17T22:47:43,040 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T22:47:43,040 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:47:43,040 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T22:47:43,040 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T22:47:43,040 INFO [RS:0;1a6e40b21a48:35021 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T22:47:43,040 INFO [RS:0;1a6e40b21a48:35021 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T22:47:43,040 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(959): stopping server 1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:43,040 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:47:43,040 INFO [RS:0;1a6e40b21a48:35021 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a6e40b21a48:35021. 2024-11-17T22:47:43,041 DEBUG [RS:0;1a6e40b21a48:35021 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:47:43,041 DEBUG [RS:0;1a6e40b21a48:35021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:43,041 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T22:47:43,041 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T22:47:43,041 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T22:47:43,041 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T22:47:43,041 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-17T22:47:43,041 DEBUG [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-17T22:47:43,041 DEBUG [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-17T22:47:43,041 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:47:43,041 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:47:43,041 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:47:43,041 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:47:43,042 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:47:43,042 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-17T22:47:43,059 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740/.tmp/ns/0ff9afdbd60447a3b7fddb38df8a7ff9 is 43, key is default/ns:d/1731883662985/Put/seqid=0 2024-11-17T22:47:43,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741835_1011 (size=5153) 2024-11-17T22:47:43,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741835_1011 (size=5153) 2024-11-17T22:47:43,066 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740/.tmp/ns/0ff9afdbd60447a3b7fddb38df8a7ff9 2024-11-17T22:47:43,074 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740/.tmp/ns/0ff9afdbd60447a3b7fddb38df8a7ff9 as hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740/ns/0ff9afdbd60447a3b7fddb38df8a7ff9 2024-11-17T22:47:43,081 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740/ns/0ff9afdbd60447a3b7fddb38df8a7ff9, entries=2, sequenceid=6, filesize=5.0 K 2024-11-17T22:47:43,083 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=6, compaction requested=false 2024-11-17T22:47:43,083 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T22:47:43,088 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-17T22:47:43,089 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:47:43,089 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:47:43,089 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883663041Running coprocessor pre-close hooks at 1731883663041Disabling compacts and flushes for region at 1731883663041Disabling writes for close at 1731883663041Obtaining lock to block concurrent updates at 1731883663042 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731883663042Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731883663042Flushing stores of hbase:meta,,1.1588230740 at 1731883663043 (+1 ms)Flushing 1588230740/ns: creating writer at 1731883663043Flushing 1588230740/ns: appending metadata at 1731883663058 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731883663058Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d675d65: reopening flushed file at 1731883663073 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=6, compaction requested=false at 1731883663083 (+10 ms)Writing region close event to WAL at 1731883663084 (+1 ms)Running coprocessor post-close hooks at 1731883663089 (+5 ms)Closed at 1731883663089 2024-11-17T22:47:43,089 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T22:47:43,242 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(976): stopping server 1a6e40b21a48,35021,1731883661687; all regions closed. 2024-11-17T22:47:43,243 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,243 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,243 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,244 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741834_1010 (size=1152) 2024-11-17T22:47:43,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741834_1010 (size=1152) 2024-11-17T22:47:43,249 DEBUG [RS:0;1a6e40b21a48:35021 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/oldWALs 2024-11-17T22:47:43,249 INFO [RS:0;1a6e40b21a48:35021 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C35021%2C1731883661687.meta:.meta(num 1731883662937) 2024-11-17T22:47:43,250 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,250 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,250 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,250 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,250 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741833_1009 (size=93) 2024-11-17T22:47:43,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741833_1009 (size=93) 2024-11-17T22:47:43,255 DEBUG [RS:0;1a6e40b21a48:35021 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/oldWALs 2024-11-17T22:47:43,255 INFO [RS:0;1a6e40b21a48:35021 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C35021%2C1731883661687:(num 1731883662501) 2024-11-17T22:47:43,255 DEBUG [RS:0;1a6e40b21a48:35021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:43,255 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:47:43,255 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:47:43,255 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.ChoreService(370): Chore service for: regionserver/1a6e40b21a48:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T22:47:43,255 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:47:43,255 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:47:43,255 INFO [RS:0;1a6e40b21a48:35021 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35021 2024-11-17T22:47:43,257 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a6e40b21a48,35021,1731883661687 2024-11-17T22:47:43,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:47:43,257 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:47:43,257 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a6e40b21a48,35021,1731883661687] 2024-11-17T22:47:43,258 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a6e40b21a48,35021,1731883661687 already deleted, retry=false 2024-11-17T22:47:43,258 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a6e40b21a48,35021,1731883661687 expired; onlineServers=0 2024-11-17T22:47:43,258 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a6e40b21a48,40143,1731883661629' ***** 2024-11-17T22:47:43,258 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T22:47:43,258 INFO [M:0;1a6e40b21a48:40143 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:47:43,259 INFO [M:0;1a6e40b21a48:40143 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:47:43,259 DEBUG [M:0;1a6e40b21a48:40143 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T22:47:43,259 DEBUG [M:0;1a6e40b21a48:40143 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T22:47:43,259 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T22:47:43,259 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883662301 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883662301,5,FailOnTimeoutGroup] 2024-11-17T22:47:43,259 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883662301 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883662301,5,FailOnTimeoutGroup] 2024-11-17T22:47:43,259 INFO [M:0;1a6e40b21a48:40143 {}] hbase.ChoreService(370): Chore service for: master/1a6e40b21a48:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T22:47:43,259 INFO [M:0;1a6e40b21a48:40143 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:47:43,259 DEBUG [M:0;1a6e40b21a48:40143 {}] master.HMaster(1795): Stopping service threads 2024-11-17T22:47:43,259 INFO [M:0;1a6e40b21a48:40143 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T22:47:43,259 INFO [M:0;1a6e40b21a48:40143 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:47:43,259 INFO [M:0;1a6e40b21a48:40143 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T22:47:43,260 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T22:47:43,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T22:47:43,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:43,260 DEBUG [M:0;1a6e40b21a48:40143 {}] zookeeper.ZKUtil(347): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T22:47:43,260 WARN [M:0;1a6e40b21a48:40143 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T22:47:43,261 INFO [M:0;1a6e40b21a48:40143 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/.lastflushedseqids 2024-11-17T22:47:43,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741836_1012 (size=99) 2024-11-17T22:47:43,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741836_1012 (size=99) 2024-11-17T22:47:43,267 INFO [M:0;1a6e40b21a48:40143 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T22:47:43,267 INFO [M:0;1a6e40b21a48:40143 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T22:47:43,268 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:47:43,268 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:43,268 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:43,268 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:47:43,268 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:43,268 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-17T22:47:43,290 DEBUG [M:0;1a6e40b21a48:40143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7bc9ea2c8f54310be96d6e929598a85 is 82, key is hbase:meta,,1/info:regioninfo/1731883662969/Put/seqid=0 2024-11-17T22:47:43,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741837_1013 (size=5672) 2024-11-17T22:47:43,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741837_1013 (size=5672) 2024-11-17T22:47:43,296 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7bc9ea2c8f54310be96d6e929598a85 2024-11-17T22:47:43,317 DEBUG [M:0;1a6e40b21a48:40143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a318b36456fd4b3aa9c336a8def108bc is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731883662991/Put/seqid=0 2024-11-17T22:47:43,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741838_1014 (size=5275) 2024-11-17T22:47:43,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741838_1014 (size=5275) 2024-11-17T22:47:43,323 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a318b36456fd4b3aa9c336a8def108bc 2024-11-17T22:47:43,344 DEBUG [M:0;1a6e40b21a48:40143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/863318dc85114a2389eee36099f1ff8c is 69, key is 1a6e40b21a48,35021,1731883661687/rs:state/1731883662341/Put/seqid=0 2024-11-17T22:47:43,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741839_1015 (size=5156) 2024-11-17T22:47:43,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741839_1015 (size=5156) 2024-11-17T22:47:43,350 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/863318dc85114a2389eee36099f1ff8c 2024-11-17T22:47:43,358 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:47:43,358 INFO [RS:0;1a6e40b21a48:35021 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:47:43,358 INFO [RS:0;1a6e40b21a48:35021 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a6e40b21a48,35021,1731883661687; zookeeper connection closed. 2024-11-17T22:47:43,358 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35021-0x1004fded5940001, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:47:43,358 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1401998e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1401998e 2024-11-17T22:47:43,358 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T22:47:43,373 DEBUG [M:0;1a6e40b21a48:40143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c93c994ebe1c41a4bfb53969db9fb36b is 52, key is load_balancer_on/state:d/1731883663035/Put/seqid=0 2024-11-17T22:47:43,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741840_1016 (size=5056) 2024-11-17T22:47:43,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741840_1016 (size=5056) 2024-11-17T22:47:43,379 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c93c994ebe1c41a4bfb53969db9fb36b 2024-11-17T22:47:43,387 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7bc9ea2c8f54310be96d6e929598a85 as hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7bc9ea2c8f54310be96d6e929598a85 2024-11-17T22:47:43,393 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7bc9ea2c8f54310be96d6e929598a85, entries=8, sequenceid=29, filesize=5.5 K 2024-11-17T22:47:43,394 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a318b36456fd4b3aa9c336a8def108bc as hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a318b36456fd4b3aa9c336a8def108bc 2024-11-17T22:47:43,401 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a318b36456fd4b3aa9c336a8def108bc, entries=3, sequenceid=29, filesize=5.2 K 2024-11-17T22:47:43,402 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/863318dc85114a2389eee36099f1ff8c as hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/863318dc85114a2389eee36099f1ff8c 2024-11-17T22:47:43,408 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/863318dc85114a2389eee36099f1ff8c, entries=1, sequenceid=29, filesize=5.0 K 2024-11-17T22:47:43,409 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c93c994ebe1c41a4bfb53969db9fb36b as hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c93c994ebe1c41a4bfb53969db9fb36b 2024-11-17T22:47:43,415 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44627/user/jenkins/test-data/81072aeb-7357-1f7b-574e-933f7127a010/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c93c994ebe1c41a4bfb53969db9fb36b, entries=1, sequenceid=29, filesize=4.9 K 2024-11-17T22:47:43,417 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false 2024-11-17T22:47:43,418 INFO [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:43,419 DEBUG [M:0;1a6e40b21a48:40143 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883663268Disabling compacts and flushes for region at 1731883663268Disabling writes for close at 1731883663268Obtaining lock to block concurrent updates at 1731883663268Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731883663268Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731883663269 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731883663269Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731883663269Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731883663290 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731883663290Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731883663302 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731883663316 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731883663316Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731883663328 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731883663343 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731883663344 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731883663357 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731883663372 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731883663372Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41d6f931: reopening flushed file at 1731883663386 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@247888c8: reopening flushed file at 1731883663393 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79b3a41e: reopening flushed file at 1731883663401 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3dfb6ed0: reopening flushed file at 1731883663408 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false at 1731883663417 (+9 ms)Writing region close event to WAL at 1731883663418 (+1 ms)Closed at 1731883663418 2024-11-17T22:47:43,419 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,419 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,419 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,419 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,419 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:43,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39137 is added to blk_1073741830_1006 (size=10311) 2024-11-17T22:47:43,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33673 is added to blk_1073741830_1006 (size=10311) 2024-11-17T22:47:43,422 INFO [M:0;1a6e40b21a48:40143 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T22:47:43,422 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:47:43,423 INFO [M:0;1a6e40b21a48:40143 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40143 2024-11-17T22:47:43,423 INFO [M:0;1a6e40b21a48:40143 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:47:43,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:47:43,524 INFO [M:0;1a6e40b21a48:40143 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:47:43,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x1004fded5940000, quorum=127.0.0.1:52293, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:47:43,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cebd4b6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:43,529 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e6ff2f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:43,529 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:43,529 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f27499{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:43,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d944f53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:43,532 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:47:43,532 WARN [BP-1935376058-172.17.0.2-1731883661003 heartbeating to localhost/127.0.0.1:44627 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:47:43,532 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:47:43,532 WARN [BP-1935376058-172.17.0.2-1731883661003 heartbeating to localhost/127.0.0.1:44627 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1935376058-172.17.0.2-1731883661003 (Datanode Uuid d8807b53-c8aa-48d0-8017-083a7178aa2c) service to localhost/127.0.0.1:44627 2024-11-17T22:47:43,533 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/data/data3/current/BP-1935376058-172.17.0.2-1731883661003 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:43,534 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/data/data4/current/BP-1935376058-172.17.0.2-1731883661003 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:43,534 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:47:43,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61e52b83{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:43,537 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29a18ee0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:43,537 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:43,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@670e4080{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:43,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@607b9bc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:43,539 WARN [BP-1935376058-172.17.0.2-1731883661003 heartbeating to localhost/127.0.0.1:44627 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:47:43,539 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:47:43,539 WARN [BP-1935376058-172.17.0.2-1731883661003 heartbeating to localhost/127.0.0.1:44627 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1935376058-172.17.0.2-1731883661003 (Datanode Uuid 45a4c0b5-8a22-4d6a-a187-5a3805f7db6f) service to localhost/127.0.0.1:44627 2024-11-17T22:47:43,539 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:47:43,540 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/data/data1/current/BP-1935376058-172.17.0.2-1731883661003 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:43,540 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/cluster_60482763-82b9-7eff-dac3-45c1d226e09b/data/data2/current/BP-1935376058-172.17.0.2-1731883661003 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:43,540 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:47:43,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3235d5ba{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:47:43,546 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@347af0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:43,546 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:43,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a69944b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:43,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3c3ceb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:43,552 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T22:47:43,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T22:47:43,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T22:47:43,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.log.dir so I do NOT create it in target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf 2024-11-17T22:47:43,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a40d76ba-cb42-9984-ea2a-c63683e5824b/hadoop.tmp.dir so I do NOT create it in target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf 2024-11-17T22:47:43,570 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc, deleteOnExit=true 2024-11-17T22:47:43,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T22:47:43,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/test.cache.data in system properties and HBase conf 2024-11-17T22:47:43,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T22:47:43,571 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T22:47:43,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T22:47:43,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:47:43,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:47:43,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T22:47:43,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/nfs.dump.dir in system properties and HBase conf 2024-11-17T22:47:43,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/java.io.tmpdir in system properties and HBase conf 2024-11-17T22:47:43,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:47:43,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T22:47:43,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T22:47:43,584 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:47:43,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:43,639 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:43,640 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:43,640 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:43,640 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:47:43,641 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:43,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42b52d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:43,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2305029e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:43,734 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62b96b7c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/java.io.tmpdir/jetty-localhost-43985-hadoop-hdfs-3_4_1-tests_jar-_-any-17782681913120623554/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:47:43,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fd186ec{HTTP/1.1, (http/1.1)}{localhost:43985} 2024-11-17T22:47:43,735 INFO [Time-limited test {}] server.Server(415): Started @104759ms 2024-11-17T22:47:43,747 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:47:43,797 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:43,800 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:43,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:43,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:43,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:47:43,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a107105{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:43,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48743db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:43,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14d09ab9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/java.io.tmpdir/jetty-localhost-37553-hadoop-hdfs-3_4_1-tests_jar-_-any-12585578936294743219/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:43,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ee6b493{HTTP/1.1, (http/1.1)}{localhost:37553} 2024-11-17T22:47:43,895 INFO [Time-limited test {}] server.Server(415): Started @104920ms 2024-11-17T22:47:43,897 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:47:43,924 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:43,928 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:43,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:43,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:43,931 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:47:43,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eee535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:43,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ec1a06e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:43,964 WARN [Thread-659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data1/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:43,964 WARN [Thread-660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data2/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:43,986 WARN [Thread-638 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:47:43,989 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb48bddb3697cf25f with lease ID 0x94736aff417bc16e: Processing first storage report for DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9 from datanode DatanodeRegistration(127.0.0.1:34923, datanodeUuid=e344c866-29df-404c-8553-a89f30d18e1f, infoPort=43281, infoSecurePort=0, ipcPort=41437, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:43,989 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb48bddb3697cf25f with lease ID 0x94736aff417bc16e: from storage DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9 node DatanodeRegistration(127.0.0.1:34923, datanodeUuid=e344c866-29df-404c-8553-a89f30d18e1f, infoPort=43281, infoSecurePort=0, ipcPort=41437, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:43,989 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb48bddb3697cf25f with lease ID 0x94736aff417bc16e: Processing first storage report for DS-47ee990a-5e86-4d72-a8d1-9ade42089b03 from datanode DatanodeRegistration(127.0.0.1:34923, datanodeUuid=e344c866-29df-404c-8553-a89f30d18e1f, infoPort=43281, infoSecurePort=0, ipcPort=41437, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:43,989 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb48bddb3697cf25f with lease ID 0x94736aff417bc16e: from storage DS-47ee990a-5e86-4d72-a8d1-9ade42089b03 node DatanodeRegistration(127.0.0.1:34923, datanodeUuid=e344c866-29df-404c-8553-a89f30d18e1f, infoPort=43281, infoSecurePort=0, ipcPort=41437, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:44,036 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7330fb3f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/java.io.tmpdir/jetty-localhost-42429-hadoop-hdfs-3_4_1-tests_jar-_-any-481978155086186718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:44,036 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20aa2ea7{HTTP/1.1, (http/1.1)}{localhost:42429} 2024-11-17T22:47:44,036 INFO [Time-limited test {}] server.Server(415): Started @105061ms 2024-11-17T22:47:44,038 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:47:44,100 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data3/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:44,100 WARN [Thread-686 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data4/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:44,121 WARN [Thread-674 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:47:44,123 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ad91c7b7c8f8583 with lease ID 0x94736aff417bc16f: Processing first storage report for DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b from datanode DatanodeRegistration(127.0.0.1:42011, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=42065, infoSecurePort=0, ipcPort=44315, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:44,123 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ad91c7b7c8f8583 with lease ID 0x94736aff417bc16f: from storage DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b node DatanodeRegistration(127.0.0.1:42011, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=42065, infoSecurePort=0, ipcPort=44315, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:44,124 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ad91c7b7c8f8583 with lease ID 0x94736aff417bc16f: Processing first storage report for DS-a55a51f4-5881-4ae7-9da5-8fe38c57ec85 from datanode DatanodeRegistration(127.0.0.1:42011, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=42065, infoSecurePort=0, ipcPort=44315, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:44,124 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ad91c7b7c8f8583 with lease ID 0x94736aff417bc16f: from storage DS-a55a51f4-5881-4ae7-9da5-8fe38c57ec85 node DatanodeRegistration(127.0.0.1:42011, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=42065, infoSecurePort=0, ipcPort=44315, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:44,165 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf 2024-11-17T22:47:44,167 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/zookeeper_0, clientPort=51766, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T22:47:44,168 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51766 2024-11-17T22:47:44,168 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:44,170 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:44,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:47:44,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:47:44,181 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048 with version=8 2024-11-17T22:47:44,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase-staging 2024-11-17T22:47:44,184 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:47:44,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:44,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:44,184 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:47:44,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:44,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:47:44,184 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T22:47:44,184 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:47:44,185 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44599 2024-11-17T22:47:44,187 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44599 connecting to ZooKeeper ensemble=127.0.0.1:51766 2024-11-17T22:47:44,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:445990x0, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:47:44,191 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44599-0x1004fdedf950000 connected 2024-11-17T22:47:44,206 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:44,209 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:44,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:47:44,212 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048, hbase.cluster.distributed=false 2024-11-17T22:47:44,214 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:47:44,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44599 2024-11-17T22:47:44,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44599 2024-11-17T22:47:44,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44599 2024-11-17T22:47:44,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44599 2024-11-17T22:47:44,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44599 2024-11-17T22:47:44,234 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:47:44,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:44,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:44,235 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:47:44,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:44,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:47:44,235 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T22:47:44,235 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:47:44,236 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45593 2024-11-17T22:47:44,237 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45593 connecting to ZooKeeper ensemble=127.0.0.1:51766 2024-11-17T22:47:44,238 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:44,240 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:44,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455930x0, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:47:44,244 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45593-0x1004fdedf950001 connected 2024-11-17T22:47:44,244 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:47:44,245 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T22:47:44,248 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T22:47:44,249 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T22:47:44,250 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:47:44,251 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45593 2024-11-17T22:47:44,251 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45593 2024-11-17T22:47:44,252 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45593 2024-11-17T22:47:44,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45593 2024-11-17T22:47:44,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45593 2024-11-17T22:47:44,264 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a6e40b21a48:44599 2024-11-17T22:47:44,265 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:44,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:47:44,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:47:44,267 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:44,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T22:47:44,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,268 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T22:47:44,269 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a6e40b21a48,44599,1731883664183 from backup master directory 2024-11-17T22:47:44,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:44,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:47:44,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:47:44,270 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:47:44,270 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:44,274 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/hbase.id] with ID: c03b3dfd-a67c-435f-ae80-b49811de8429 2024-11-17T22:47:44,274 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/.tmp/hbase.id 2024-11-17T22:47:44,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:47:44,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:47:44,283 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/.tmp/hbase.id]:[hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/hbase.id] 2024-11-17T22:47:44,298 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:44,298 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T22:47:44,299 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T22:47:44,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:47:44,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:47:44,313 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:47:44,313 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T22:47:44,314 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:47:44,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:47:44,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:47:44,323 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store 2024-11-17T22:47:44,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:47:44,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:47:44,331 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:44,331 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:47:44,331 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:44,331 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:44,331 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:47:44,331 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:44,331 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:47:44,331 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883664331Disabling compacts and flushes for region at 1731883664331Disabling writes for close at 1731883664331Writing region close event to WAL at 1731883664331Closed at 1731883664331 2024-11-17T22:47:44,332 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/.initializing 2024-11-17T22:47:44,332 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:44,335 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C44599%2C1731883664183, suffix=, logDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183, archiveDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/oldWALs, maxLogs=10 2024-11-17T22:47:44,335 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C44599%2C1731883664183.1731883664335 2024-11-17T22:47:44,341 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 2024-11-17T22:47:44,344 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42065:42065),(127.0.0.1/127.0.0.1:43281:43281)] 2024-11-17T22:47:44,349 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:47:44,349 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:44,350 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,350 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T22:47:44,353 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:44,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T22:47:44,355 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:47:44,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T22:47:44,358 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:47:44,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T22:47:44,360 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:47:44,361 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,362 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,362 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,364 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,364 INFO [regionserver/1a6e40b21a48:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:47:44,364 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,365 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T22:47:44,366 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:47:44,369 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:47:44,370 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783222, jitterRate=-0.004082590341567993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T22:47:44,371 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731883664350Initializing all the Stores at 1731883664351 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883664351Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883664351Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883664351Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883664351Cleaning up temporary data from old regions at 1731883664364 (+13 ms)Region opened successfully at 1731883664371 (+7 ms) 2024-11-17T22:47:44,371 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T22:47:44,376 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41f2bdfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:47:44,377 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T22:47:44,377 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T22:47:44,377 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T22:47:44,377 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T22:47:44,378 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T22:47:44,378 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T22:47:44,378 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T22:47:44,383 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T22:47:44,384 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T22:47:44,385 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T22:47:44,385 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T22:47:44,386 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T22:47:44,387 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T22:47:44,387 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T22:47:44,388 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T22:47:44,389 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T22:47:44,390 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T22:47:44,391 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T22:47:44,393 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T22:47:44,394 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T22:47:44,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:47:44,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:47:44,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,395 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a6e40b21a48,44599,1731883664183, sessionid=0x1004fdedf950000, setting cluster-up flag (Was=false) 2024-11-17T22:47:44,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,400 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T22:47:44,401 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:44,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,407 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T22:47:44,408 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:44,410 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T22:47:44,412 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T22:47:44,412 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T22:47:44,412 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T22:47:44,412 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a6e40b21a48,44599,1731883664183 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T22:47:44,414 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:47:44,414 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:47:44,414 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:47:44,414 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:47:44,414 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a6e40b21a48:0, corePoolSize=10, maxPoolSize=10 2024-11-17T22:47:44,414 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,414 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:47:44,414 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,415 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731883694415 2024-11-17T22:47:44,415 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T22:47:44,415 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T22:47:44,415 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T22:47:44,415 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T22:47:44,416 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T22:47:44,416 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T22:47:44,416 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,416 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T22:47:44,416 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T22:47:44,416 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T22:47:44,417 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T22:47:44,417 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T22:47:44,417 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:47:44,417 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T22:47:44,417 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883664417,5,FailOnTimeoutGroup] 2024-11-17T22:47:44,418 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,418 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T22:47:44,420 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883664417,5,FailOnTimeoutGroup] 2024-11-17T22:47:44,421 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,421 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T22:47:44,421 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,421 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:47:44,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:47:44,427 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T22:47:44,428 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048 2024-11-17T22:47:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:47:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:47:44,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:44,441 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:47:44,442 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:47:44,443 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:44,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:47:44,445 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:47:44,445 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:44,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:47:44,447 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:47:44,448 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:44,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:47:44,450 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:47:44,450 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:44,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:44,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:47:44,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740 2024-11-17T22:47:44,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740 2024-11-17T22:47:44,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:47:44,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:47:44,455 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(746): ClusterId : c03b3dfd-a67c-435f-ae80-b49811de8429 2024-11-17T22:47:44,455 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T22:47:44,455 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:47:44,457 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:47:44,457 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T22:47:44,457 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T22:47:44,459 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T22:47:44,459 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:47:44,459 DEBUG [RS:0;1a6e40b21a48:45593 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b567beb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:47:44,460 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820809, jitterRate=0.04371313750743866}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:47:44,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731883664440Initializing all the Stores at 1731883664441 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883664441Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883664441Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883664441Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883664441Cleaning up temporary data from old regions at 1731883664454 (+13 ms)Region opened successfully at 1731883664461 (+7 ms) 2024-11-17T22:47:44,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:47:44,461 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:47:44,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:47:44,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:47:44,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:47:44,461 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:47:44,462 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883664461Disabling compacts and flushes for region at 1731883664461Disabling writes for close at 1731883664461Writing region close event to WAL at 1731883664461Closed at 1731883664461 2024-11-17T22:47:44,463 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:47:44,463 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T22:47:44,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T22:47:44,464 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:47:44,465 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T22:47:44,476 DEBUG [RS:0;1a6e40b21a48:45593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a6e40b21a48:45593 2024-11-17T22:47:44,476 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T22:47:44,476 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T22:47:44,476 DEBUG [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T22:47:44,477 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a6e40b21a48,44599,1731883664183 with port=45593, startcode=1731883664234 2024-11-17T22:47:44,477 DEBUG [RS:0;1a6e40b21a48:45593 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T22:47:44,479 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36999, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T22:47:44,479 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44599 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:44,480 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44599 {}] master.ServerManager(517): Registering regionserver=1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:44,481 DEBUG [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048 2024-11-17T22:47:44,481 DEBUG [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39901 2024-11-17T22:47:44,481 DEBUG [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T22:47:44,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:47:44,483 DEBUG [RS:0;1a6e40b21a48:45593 {}] zookeeper.ZKUtil(111): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:44,483 WARN [RS:0;1a6e40b21a48:45593 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:47:44,483 INFO [RS:0;1a6e40b21a48:45593 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:47:44,483 DEBUG [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:44,484 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a6e40b21a48,45593,1731883664234] 2024-11-17T22:47:44,487 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T22:47:44,490 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T22:47:44,490 INFO [RS:0;1a6e40b21a48:45593 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:47:44,490 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,490 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T22:47:44,491 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T22:47:44,491 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,491 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,491 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:47:44,492 DEBUG [RS:0;1a6e40b21a48:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:47:44,493 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,493 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,493 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,493 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,493 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,493 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45593,1731883664234-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:47:44,508 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T22:47:44,508 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45593,1731883664234-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,508 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,508 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.Replication(171): 1a6e40b21a48,45593,1731883664234 started 2024-11-17T22:47:44,522 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:44,522 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1482): Serving as 1a6e40b21a48,45593,1731883664234, RpcServer on 1a6e40b21a48/172.17.0.2:45593, sessionid=0x1004fdedf950001 2024-11-17T22:47:44,522 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T22:47:44,522 DEBUG [RS:0;1a6e40b21a48:45593 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:44,522 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,45593,1731883664234' 2024-11-17T22:47:44,522 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T22:47:44,523 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T22:47:44,523 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T22:47:44,523 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T22:47:44,523 DEBUG [RS:0;1a6e40b21a48:45593 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:44,523 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,45593,1731883664234' 2024-11-17T22:47:44,524 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T22:47:44,524 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T22:47:44,525 DEBUG [RS:0;1a6e40b21a48:45593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T22:47:44,525 INFO [RS:0;1a6e40b21a48:45593 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T22:47:44,525 INFO [RS:0;1a6e40b21a48:45593 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T22:47:44,616 WARN [1a6e40b21a48:44599 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-17T22:47:44,629 INFO [RS:0;1a6e40b21a48:45593 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C45593%2C1731883664234, suffix=, logDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234, archiveDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs, maxLogs=32 2024-11-17T22:47:44,632 INFO [RS:0;1a6e40b21a48:45593 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45593%2C1731883664234.1731883664631 2024-11-17T22:47:44,642 INFO [RS:0;1a6e40b21a48:45593 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 2024-11-17T22:47:44,644 DEBUG [RS:0;1a6e40b21a48:45593 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43281:43281),(127.0.0.1/127.0.0.1:42065:42065)] 2024-11-17T22:47:44,866 DEBUG [1a6e40b21a48:44599 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T22:47:44,867 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:44,871 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,45593,1731883664234, state=OPENING 2024-11-17T22:47:44,874 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T22:47:44,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:47:44,877 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:47:44,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:47:44,877 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,45593,1731883664234}] 2024-11-17T22:47:44,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:47:44,902 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:47:44,903 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T22:47:44,904 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-17T22:47:45,033 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T22:47:45,038 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32821, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T22:47:45,044 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T22:47:45,044 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:47:45,048 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C45593%2C1731883664234.meta, suffix=.meta, logDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234, archiveDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs, maxLogs=32 2024-11-17T22:47:45,049 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta 2024-11-17T22:47:45,056 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta 2024-11-17T22:47:45,058 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43281:43281),(127.0.0.1/127.0.0.1:42065:42065)] 2024-11-17T22:47:45,059 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:47:45,059 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T22:47:45,059 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T22:47:45,060 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T22:47:45,060 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T22:47:45,060 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:45,060 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T22:47:45,060 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T22:47:45,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:47:45,063 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:47:45,064 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:45,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:45,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:47:45,065 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:47:45,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:45,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:45,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:47:45,067 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:47:45,067 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:45,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:45,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:47:45,069 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:47:45,069 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:45,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:47:45,070 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:47:45,071 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740 2024-11-17T22:47:45,073 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740 2024-11-17T22:47:45,074 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:47:45,074 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:47:45,075 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:47:45,076 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:47:45,077 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822854, jitterRate=0.04631297290325165}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:47:45,077 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T22:47:45,078 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731883665060Writing region info on filesystem at 1731883665060Initializing all the Stores at 1731883665062 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883665062Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883665062Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883665062Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883665062Cleaning up temporary data from old regions at 1731883665074 (+12 ms)Running coprocessor post-open hooks at 1731883665077 (+3 ms)Region opened successfully at 1731883665078 (+1 ms) 2024-11-17T22:47:45,079 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731883665032 2024-11-17T22:47:45,082 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T22:47:45,082 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T22:47:45,083 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:45,084 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,45593,1731883664234, state=OPEN 2024-11-17T22:47:45,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:47:45,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:47:45,086 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:45,086 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:47:45,086 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:47:45,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T22:47:45,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,45593,1731883664234 in 209 msec 2024-11-17T22:47:45,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T22:47:45,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 627 msec 2024-11-17T22:47:45,094 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:47:45,094 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T22:47:45,095 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:47:45,095 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,45593,1731883664234, seqNum=-1] 2024-11-17T22:47:45,096 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:47:45,097 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37257, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:47:45,104 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 691 msec 2024-11-17T22:47:45,104 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731883665104, completionTime=-1 2024-11-17T22:47:45,104 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T22:47:45,104 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-17T22:47:45,106 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-17T22:47:45,106 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731883725106 2024-11-17T22:47:45,106 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731883785106 2024-11-17T22:47:45,106 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-17T22:47:45,106 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44599,1731883664183-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,106 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44599,1731883664183-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,106 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44599,1731883664183-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,106 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a6e40b21a48:44599, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,107 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,107 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,109 DEBUG [master/1a6e40b21a48:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T22:47:45,110 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.840sec 2024-11-17T22:47:45,111 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T22:47:45,111 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T22:47:45,111 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T22:47:45,111 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T22:47:45,111 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T22:47:45,111 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44599,1731883664183-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:47:45,111 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44599,1731883664183-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T22:47:45,113 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T22:47:45,113 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T22:47:45,114 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44599,1731883664183-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,172 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e8113bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:47:45,173 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a6e40b21a48,44599,-1 for getting cluster id 2024-11-17T22:47:45,173 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T22:47:45,174 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c03b3dfd-a67c-435f-ae80-b49811de8429' 2024-11-17T22:47:45,175 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T22:47:45,175 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c03b3dfd-a67c-435f-ae80-b49811de8429" 2024-11-17T22:47:45,175 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a5afd13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:47:45,175 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a6e40b21a48,44599,-1] 2024-11-17T22:47:45,175 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T22:47:45,175 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:47:45,177 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48378, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T22:47:45,178 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c7ae56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:47:45,178 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:47:45,179 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,45593,1731883664234, seqNum=-1] 2024-11-17T22:47:45,179 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:47:45,181 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60084, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:47:45,183 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:45,183 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:45,186 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T22:47:45,200 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:47:45,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:45,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:45,200 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:47:45,201 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:47:45,201 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:47:45,201 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T22:47:45,201 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:47:45,201 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43411 2024-11-17T22:47:45,203 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43411 connecting to ZooKeeper ensemble=127.0.0.1:51766 2024-11-17T22:47:45,203 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:45,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:47:45,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:434110x0, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:47:45,208 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43411-0x1004fdedf950002 connected 2024-11-17T22:47:45,208 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-17T22:47:45,208 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-17T22:47:45,209 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T22:47:45,210 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T22:47:45,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T22:47:45,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:47:45,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43411 2024-11-17T22:47:45,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43411 2024-11-17T22:47:45,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43411 2024-11-17T22:47:45,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43411 2024-11-17T22:47:45,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43411 2024-11-17T22:47:45,218 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(746): ClusterId : c03b3dfd-a67c-435f-ae80-b49811de8429 2024-11-17T22:47:45,218 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T22:47:45,220 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T22:47:45,220 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T22:47:45,221 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T22:47:45,222 DEBUG [RS:1;1a6e40b21a48:43411 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36f06d27, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:47:45,233 DEBUG [RS:1;1a6e40b21a48:43411 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;1a6e40b21a48:43411 2024-11-17T22:47:45,233 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T22:47:45,233 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T22:47:45,233 DEBUG [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T22:47:45,234 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a6e40b21a48,44599,1731883664183 with port=43411, startcode=1731883665200 2024-11-17T22:47:45,234 DEBUG [RS:1;1a6e40b21a48:43411 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T22:47:45,235 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59735, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T22:47:45,236 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44599 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a6e40b21a48,43411,1731883665200 2024-11-17T22:47:45,236 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44599 {}] master.ServerManager(517): Registering regionserver=1a6e40b21a48,43411,1731883665200 2024-11-17T22:47:45,237 DEBUG [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048 2024-11-17T22:47:45,237 DEBUG [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39901 2024-11-17T22:47:45,237 DEBUG [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T22:47:45,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:47:45,239 DEBUG [RS:1;1a6e40b21a48:43411 {}] zookeeper.ZKUtil(111): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a6e40b21a48,43411,1731883665200 2024-11-17T22:47:45,239 WARN [RS:1;1a6e40b21a48:43411 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:47:45,239 INFO [RS:1;1a6e40b21a48:43411 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:47:45,239 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a6e40b21a48,43411,1731883665200] 2024-11-17T22:47:45,239 DEBUG [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200 2024-11-17T22:47:45,243 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T22:47:45,246 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T22:47:45,246 INFO [RS:1;1a6e40b21a48:43411 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:47:45,246 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,246 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T22:47:45,247 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T22:47:45,247 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,248 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,249 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:47:45,249 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:47:45,249 DEBUG [RS:1;1a6e40b21a48:43411 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:47:45,249 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,249 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,250 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,250 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,250 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,250 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,43411,1731883665200-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:47:45,269 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T22:47:45,269 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,43411,1731883665200-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,270 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,270 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.Replication(171): 1a6e40b21a48,43411,1731883665200 started 2024-11-17T22:47:45,282 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:47:45,282 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(1482): Serving as 1a6e40b21a48,43411,1731883665200, RpcServer on 1a6e40b21a48/172.17.0.2:43411, sessionid=0x1004fdedf950002 2024-11-17T22:47:45,282 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T22:47:45,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;1a6e40b21a48:43411,5,FailOnTimeoutGroup] 2024-11-17T22:47:45,283 DEBUG [RS:1;1a6e40b21a48:43411 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a6e40b21a48,43411,1731883665200 2024-11-17T22:47:45,283 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,43411,1731883665200' 2024-11-17T22:47:45,283 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T22:47:45,283 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-17T22:47:45,283 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T22:47:45,283 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T22:47:45,284 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T22:47:45,284 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T22:47:45,284 DEBUG [RS:1;1a6e40b21a48:43411 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a6e40b21a48,43411,1731883665200 2024-11-17T22:47:45,284 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,43411,1731883665200' 2024-11-17T22:47:45,284 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T22:47:45,284 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a6e40b21a48,44599,1731883664183 2024-11-17T22:47:45,284 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T22:47:45,284 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5fdfd43b 2024-11-17T22:47:45,284 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T22:47:45,285 DEBUG [RS:1;1a6e40b21a48:43411 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T22:47:45,285 INFO [RS:1;1a6e40b21a48:43411 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T22:47:45,285 INFO [RS:1;1a6e40b21a48:43411 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T22:47:45,286 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T22:47:45,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44599 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T22:47:45,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44599 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T22:47:45,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44599 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:47:45,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44599 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T22:47:45,290 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T22:47:45,291 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:45,291 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44599 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-17T22:47:45,292 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T22:47:45,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44599 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:47:45,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741835_1011 (size=393) 2024-11-17T22:47:45,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741835_1011 (size=393) 2024-11-17T22:47:45,300 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4ac114b5e436507fa65c786b22107866, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048 2024-11-17T22:47:45,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34923 is added to blk_1073741836_1012 (size=76) 2024-11-17T22:47:45,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42011 is added to blk_1073741836_1012 (size=76) 2024-11-17T22:47:45,308 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:45,308 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 4ac114b5e436507fa65c786b22107866, disabling compactions & flushes 2024-11-17T22:47:45,308 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:47:45,308 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:47:45,308 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. after waiting 0 ms 2024-11-17T22:47:45,308 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:47:45,308 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:47:45,308 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4ac114b5e436507fa65c786b22107866: Waiting for close lock at 1731883665308Disabling compacts and flushes for region at 1731883665308Disabling writes for close at 1731883665308Writing region close event to WAL at 1731883665308Closed at 1731883665308 2024-11-17T22:47:45,310 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T22:47:45,310 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731883665310"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731883665310"}]},"ts":"1731883665310"} 2024-11-17T22:47:45,312 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T22:47:45,314 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T22:47:45,314 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883665314"}]},"ts":"1731883665314"} 2024-11-17T22:47:45,316 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-17T22:47:45,316 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ac114b5e436507fa65c786b22107866, ASSIGN}] 2024-11-17T22:47:45,318 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ac114b5e436507fa65c786b22107866, ASSIGN 2024-11-17T22:47:45,319 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ac114b5e436507fa65c786b22107866, ASSIGN; state=OFFLINE, location=1a6e40b21a48,45593,1731883664234; forceNewPlan=false, retain=false 2024-11-17T22:47:45,390 INFO [RS:1;1a6e40b21a48:43411 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C43411%2C1731883665200, suffix=, logDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200, archiveDir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs, maxLogs=32 2024-11-17T22:47:45,392 INFO [RS:1;1a6e40b21a48:43411 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C43411%2C1731883665200.1731883665392 2024-11-17T22:47:45,400 INFO [RS:1;1a6e40b21a48:43411 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 2024-11-17T22:47:45,401 DEBUG [RS:1;1a6e40b21a48:43411 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43281:43281),(127.0.0.1/127.0.0.1:42065:42065)] 2024-11-17T22:47:45,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:45,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:45,470 INFO [1a6e40b21a48:44599 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-17T22:47:45,470 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4ac114b5e436507fa65c786b22107866, regionState=OPENING, regionLocation=1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:45,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ac114b5e436507fa65c786b22107866, ASSIGN because future has completed 2024-11-17T22:47:45,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4ac114b5e436507fa65c786b22107866, server=1a6e40b21a48,45593,1731883664234}] 2024-11-17T22:47:45,632 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:47:45,632 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4ac114b5e436507fa65c786b22107866, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:47:45,633 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,633 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:47:45,633 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,633 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,635 INFO [StoreOpener-4ac114b5e436507fa65c786b22107866-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,636 INFO [StoreOpener-4ac114b5e436507fa65c786b22107866-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ac114b5e436507fa65c786b22107866 columnFamilyName info 2024-11-17T22:47:45,637 DEBUG [StoreOpener-4ac114b5e436507fa65c786b22107866-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:47:45,637 INFO [StoreOpener-4ac114b5e436507fa65c786b22107866-1 {}] regionserver.HStore(327): Store=4ac114b5e436507fa65c786b22107866/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:47:45,637 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,639 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,639 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,640 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,640 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,642 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,645 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:47:45,645 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4ac114b5e436507fa65c786b22107866; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=701582, jitterRate=-0.1078932136297226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T22:47:45,646 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4ac114b5e436507fa65c786b22107866 2024-11-17T22:47:45,647 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4ac114b5e436507fa65c786b22107866: Running coprocessor pre-open hook at 1731883665633Writing region info on filesystem at 1731883665633Initializing all the Stores at 1731883665634 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883665634Cleaning up temporary data from old regions at 1731883665640 (+6 ms)Running coprocessor post-open hooks at 1731883665646 (+6 ms)Region opened successfully at 1731883665647 (+1 ms) 2024-11-17T22:47:45,648 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866., pid=6, masterSystemTime=1731883665627 2024-11-17T22:47:45,651 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:47:45,651 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:47:45,652 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4ac114b5e436507fa65c786b22107866, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,45593,1731883664234 2024-11-17T22:47:45,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4ac114b5e436507fa65c786b22107866, server=1a6e40b21a48,45593,1731883664234 because future has completed 2024-11-17T22:47:45,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T22:47:45,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4ac114b5e436507fa65c786b22107866, server=1a6e40b21a48,45593,1731883664234 in 183 msec 2024-11-17T22:47:45,663 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T22:47:45,663 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ac114b5e436507fa65c786b22107866, ASSIGN in 344 msec 2024-11-17T22:47:45,664 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T22:47:45,665 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883665664"}]},"ts":"1731883665664"} 2024-11-17T22:47:45,667 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-17T22:47:45,668 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T22:47:45,671 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 382 msec 2024-11-17T22:47:45,977 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:47:45,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:46,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:46,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:46,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:50,488 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-17T22:47:51,136 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:47:51,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:51,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:51,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:51,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:47:54,902 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T22:47:54,902 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T22:47:54,904 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T22:47:54,904 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-17T22:47:54,905 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:47:54,905 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T22:47:55,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44599 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:47:55,311 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-17T22:47:55,311 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-17T22:47:55,315 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T22:47:55,315 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:47:55,326 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:55,330 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:55,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:55,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:55,330 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:47:55,331 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0f4a9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:55,331 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ff5703b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:55,424 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2047cbbb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/java.io.tmpdir/jetty-localhost-41463-hadoop-hdfs-3_4_1-tests_jar-_-any-14726426907351439440/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:55,425 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2021586{HTTP/1.1, (http/1.1)}{localhost:41463} 2024-11-17T22:47:55,425 INFO [Time-limited test {}] server.Server(415): Started @116449ms 2024-11-17T22:47:55,426 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:47:55,455 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:55,458 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:55,459 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:55,459 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:55,459 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:47:55,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:55,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:55,483 WARN [Thread-831 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data5/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:55,484 WARN [Thread-832 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data6/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:55,506 WARN [Thread-811 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:47:55,508 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe989ef096b478869 with lease ID 0x94736aff417bc170: Processing first storage report for DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a from datanode DatanodeRegistration(127.0.0.1:36249, datanodeUuid=72a50233-41d5-4607-91e8-9dbeeca91ac4, infoPort=42331, infoSecurePort=0, ipcPort=36921, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:55,508 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe989ef096b478869 with lease ID 0x94736aff417bc170: from storage DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a node DatanodeRegistration(127.0.0.1:36249, datanodeUuid=72a50233-41d5-4607-91e8-9dbeeca91ac4, infoPort=42331, infoSecurePort=0, ipcPort=36921, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:55,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe989ef096b478869 with lease ID 0x94736aff417bc170: Processing first storage report for DS-df242567-bcc9-4d8d-a654-6fb8b5e27b60 from datanode DatanodeRegistration(127.0.0.1:36249, datanodeUuid=72a50233-41d5-4607-91e8-9dbeeca91ac4, infoPort=42331, infoSecurePort=0, ipcPort=36921, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:55,509 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe989ef096b478869 with lease ID 0x94736aff417bc170: from storage DS-df242567-bcc9-4d8d-a654-6fb8b5e27b60 node DatanodeRegistration(127.0.0.1:36249, datanodeUuid=72a50233-41d5-4607-91e8-9dbeeca91ac4, infoPort=42331, infoSecurePort=0, ipcPort=36921, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:55,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b5be5aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/java.io.tmpdir/jetty-localhost-42163-hadoop-hdfs-3_4_1-tests_jar-_-any-8128077748802604132/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:55,560 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:42163} 2024-11-17T22:47:55,560 INFO [Time-limited test {}] server.Server(415): Started @116584ms 2024-11-17T22:47:55,561 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:47:55,602 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:47:55,605 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:47:55,609 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:47:55,609 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:47:55,609 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:47:55,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:47:55,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:47:55,628 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:55,628 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:55,645 WARN [Thread-846 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:47:55,647 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde875602ed49c544 with lease ID 0x94736aff417bc171: Processing first storage report for DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23 from datanode DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:55,647 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde875602ed49c544 with lease ID 0x94736aff417bc171: from storage DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23 node DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:55,647 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde875602ed49c544 with lease ID 0x94736aff417bc171: Processing first storage report for DS-3203883b-8e7f-4f56-908d-42b7ea385c68 from datanode DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:55,647 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde875602ed49c544 with lease ID 0x94736aff417bc171: from storage DS-3203883b-8e7f-4f56-908d-42b7ea385c68 node DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:55,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30add41a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/java.io.tmpdir/jetty-localhost-45485-hadoop-hdfs-3_4_1-tests_jar-_-any-5087240283816923438/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:55,708 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:45485} 2024-11-17T22:47:55,708 INFO [Time-limited test {}] server.Server(415): Started @116732ms 2024-11-17T22:47:55,709 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:47:55,769 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data9/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:55,769 WARN [Thread-893 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data10/current/BP-567024958-172.17.0.2-1731883663595/current, will proceed with Du for space computation calculation, 2024-11-17T22:47:55,787 WARN [Thread-881 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:47:55,790 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fe98e974464def4 with lease ID 0x94736aff417bc172: Processing first storage report for DS-ec690503-8b72-4dcd-98fc-704818383b02 from datanode DatanodeRegistration(127.0.0.1:36677, datanodeUuid=c3dc079d-ff83-4484-9803-0d11d18f0987, infoPort=37417, infoSecurePort=0, ipcPort=43195, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:55,790 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fe98e974464def4 with lease ID 0x94736aff417bc172: from storage DS-ec690503-8b72-4dcd-98fc-704818383b02 node DatanodeRegistration(127.0.0.1:36677, datanodeUuid=c3dc079d-ff83-4484-9803-0d11d18f0987, infoPort=37417, infoSecurePort=0, ipcPort=43195, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:47:55,790 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fe98e974464def4 with lease ID 0x94736aff417bc172: Processing first storage report for DS-34ed0425-b090-461d-a3a5-7cf27797d1bc from datanode DatanodeRegistration(127.0.0.1:36677, datanodeUuid=c3dc079d-ff83-4484-9803-0d11d18f0987, infoPort=37417, infoSecurePort=0, ipcPort=43195, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595) 2024-11-17T22:47:55,790 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fe98e974464def4 with lease ID 0x94736aff417bc172: from storage DS-34ed0425-b090-461d-a3a5-7cf27797d1bc node DatanodeRegistration(127.0.0.1:36677, datanodeUuid=c3dc079d-ff83-4484-9803-0d11d18f0987, infoPort=37417, infoSecurePort=0, ipcPort=43195, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T22:47:55,828 WARN [ResponseProcessor for block BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,828 WARN [ResponseProcessor for block BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,828 WARN [ResponseProcessor for block BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,829 WARN [DataStreamer for file /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 block BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:47:55,828 WARN [DataStreamer for file /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 block BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:47:55,829 WARN [DataStreamer for file /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 block BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:47:55,829 WARN [PacketResponder: BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42011] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-634664490_22 at /127.0.0.1:53160 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34923:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53160 dst: /127.0.0.1:34923 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-985877150_22 at /127.0.0.1:53094 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34923:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53094 dst: /127.0.0.1:34923 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-985877150_22 at /127.0.0.1:38634 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38634 dst: /127.0.0.1:42011 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-634664490_22 at /127.0.0.1:38716 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:42011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38716 dst: /127.0.0.1:42011 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,828 WARN [ResponseProcessor for block BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,833 WARN [DataStreamer for file /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta block BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:47:55,833 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:53114 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34923:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53114 dst: /127.0.0.1:34923 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,834 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:38660 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38660 dst: /127.0.0.1:42011 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7330fb3f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:55,835 WARN [PacketResponder: BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42011] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,836 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:53124 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34923:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53124 dst: /127.0.0.1:34923 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,836 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20aa2ea7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:55,836 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:55,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ec1a06e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:55,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eee535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:55,837 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:38674 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38674 dst: /127.0.0.1:42011 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,839 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:47:55,839 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:47:55,839 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:47:55,839 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-567024958-172.17.0.2-1731883663595 (Datanode Uuid 49dcf81b-409b-44ba-a903-7dd5ed0cf385) service to localhost/127.0.0.1:39901 2024-11-17T22:47:55,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data3/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:55,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data4/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:55,840 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:47:55,840 WARN [DataStreamer for file /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 block BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,841 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@37005937 {}] datanode.DataXceiver(331): 127.0.0.1:34923:DataXceiver error processing unknown operation src: /127.0.0.1:47692 dst: /127.0.0.1:34923 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,842 WARN [DataStreamer for file /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta block BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,844 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-985877150_22 at /127.0.0.1:47662 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34923:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47662 dst: /127.0.0.1:34923 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,845 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-634664490_22 at /127.0.0.1:47676 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34923:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47676 dst: /127.0.0.1:34923 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:55,845 WARN [ResponseProcessor for block BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-567024958-172.17.0.2-1731883663595:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,845 WARN [ResponseProcessor for block BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-567024958-172.17.0.2-1731883663595:blk_1073741837_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14d09ab9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:55,847 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ee6b493{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:55,847 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:55,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48743db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:55,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a107105{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:55,849 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:47:55,849 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:47:55,849 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-567024958-172.17.0.2-1731883663595 (Datanode Uuid e344c866-29df-404c-8553-a89f30d18e1f) service to localhost/127.0.0.1:39901 2024-11-17T22:47:55,849 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:47:55,849 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data1/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:55,850 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data2/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:55,850 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:47:55,854 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866., hostname=1a6e40b21a48,45593,1731883664234, seqNum=2] 2024-11-17T22:47:55,856 ERROR [FSHLog-0-hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048-prefix:1a6e40b21a48,45593,1731883664234 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,856 WARN [FSHLog-0-hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048-prefix:1a6e40b21a48,45593,1731883664234 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,856 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,856 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45593%2C1731883664234:(num 1731883664631) roll requested 2024-11-17T22:47:55,857 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45593%2C1731883664234.1731883675856 2024-11-17T22:47:55,860 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,860 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:47:55,861 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741838_1018 2024-11-17T22:47:55,863 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:47:55,871 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:55,871 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:55,871 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:55,871 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:55,871 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:55,871 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883675856 2024-11-17T22:47:55,872 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,872 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:55,873 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-17T22:47:55,873 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-17T22:47:55,874 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 2024-11-17T22:47:55,874 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37417:37417),(127.0.0.1/127.0.0.1:42331:42331)] 2024-11-17T22:47:55,874 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 is not closed yet, will try archiving it next time 2024-11-17T22:47:55,877 WARN [IPC Server handler 0 on default port 39901 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-17T22:47:55,881 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 after 5ms 2024-11-17T22:47:56,315 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:57,250 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:57,875 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:57,877 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883675856 2024-11-17T22:47:57,878 WARN [ResponseProcessor for block BP-567024958-172.17.0.2-1731883663595:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-567024958-172.17.0.2-1731883663595:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:57,879 WARN [DataStreamer for file /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883675856 block BP-567024958-172.17.0.2-1731883663595:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK], DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:47:57,880 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:53348 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:36677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53348 dst: /127.0.0.1:36677 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:57,881 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:56008 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:36249:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56008 dst: /127.0.0.1:36249 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:57,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30add41a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:47:57,884 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:47:57,884 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:47:57,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:47:57,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,STOPPED} 2024-11-17T22:47:57,886 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:47:57,886 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:47:57,886 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-567024958-172.17.0.2-1731883663595 (Datanode Uuid c3dc079d-ff83-4484-9803-0d11d18f0987) service to localhost/127.0.0.1:39901 2024-11-17T22:47:57,886 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:47:57,887 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data9/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:57,887 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data10/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:47:57,887 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:47:58,316 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:59,251 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:59,876 WARN [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]] 2024-11-17T22:47:59,877 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:59,877 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45593%2C1731883664234:(num 1731883675856) roll requested 2024-11-17T22:47:59,878 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45593%2C1731883664234.1731883679878 2024-11-17T22:47:59,883 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 after 4009ms 2024-11-17T22:47:59,885 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:59,885 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:47:59,885 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741840_1022 2024-11-17T22:47:59,886 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK] 2024-11-17T22:47:59,887 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:59,887 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]) is bad. 2024-11-17T22:47:59,887 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741841_1023 2024-11-17T22:47:59,888 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK] 2024-11-17T22:47:59,891 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42011 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:47:59,891 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44080 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741842_1024 to mirror 127.0.0.1:42011 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:59,891 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:47:59,891 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741842_1024 2024-11-17T22:47:59,891 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44080 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-17T22:47:59,891 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44080 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44080 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:47:59,892 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:47:59,892 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T22:47:59,898 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:59,899 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:59,899 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:59,899 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:59,899 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:47:59,899 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883675856 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883679878 2024-11-17T22:47:59,900 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34141:34141),(127.0.0.1/127.0.0.1:42331:42331)] 2024-11-17T22:47:59,901 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 is not closed yet, will try archiving it next time 2024-11-17T22:47:59,901 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883675856 is not closed yet, will try archiving it next time 2024-11-17T22:47:59,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36249 is added to blk_1073741839_1021 (size=3600) 2024-11-17T22:48:00,303 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 is not closed yet, will try archiving it next time 2024-11-17T22:48:00,316 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,252 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,896 WARN [ResponseProcessor for block BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,897 WARN [DataStreamer for file /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883679878 block BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:01,897 WARN [PacketResponder: BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36249] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:01,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44094 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44094 dst: /127.0.0.1:44801 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:01,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:35412 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:36249:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35412 dst: /127.0.0.1:36249 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:01,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2047cbbb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:01,901 WARN [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]] 2024-11-17T22:48:01,901 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,901 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45593%2C1731883664234:(num 1731883679878) roll requested 2024-11-17T22:48:01,901 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2021586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:48:01,901 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:48:01,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ff5703b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:48:01,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0f4a9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,STOPPED} 2024-11-17T22:48:01,902 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45593%2C1731883664234.1731883681901 2024-11-17T22:48:01,903 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:48:01,903 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:48:01,903 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-567024958-172.17.0.2-1731883663595 (Datanode Uuid 72a50233-41d5-4607-91e8-9dbeeca91ac4) service to localhost/127.0.0.1:39901 2024-11-17T22:48:01,903 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:48:01,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data5/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:01,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data6/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:01,904 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:48:01,906 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,907 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]) is bad. 2024-11-17T22:48:01,907 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741844_1027 2024-11-17T22:48:01,907 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK] 2024-11-17T22:48:01,908 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,909 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:01,909 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741845_1028 2024-11-17T22:48:01,910 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:01,912 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36677 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,912 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44126 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741846_1029 to mirror 127.0.0.1:36677 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:01,913 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:48:01,913 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741846_1029 2024-11-17T22:48:01,913 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44126 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-17T22:48:01,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44126 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44126 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:01,913 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK] 2024-11-17T22:48:01,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45593 {}] regionserver.HRegion(8855): Flush requested on 4ac114b5e436507fa65c786b22107866 2024-11-17T22:48:01,914 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4ac114b5e436507fa65c786b22107866 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:48:01,915 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,915 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:48:01,915 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741847_1030 2024-11-17T22:48:01,916 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:48:01,917 WARN [IPC Server handler 1 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T22:48:01,917 WARN [IPC Server handler 1 on default port 39901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T22:48:01,917 WARN [IPC Server handler 1 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T22:48:01,922 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:01,922 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:01,922 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:01,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:01,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:01,923 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883679878 with entries=8, filesize=8.39 KB; new WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883681901 2024-11-17T22:48:01,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741843_1026 (size=8599) 2024-11-17T22:48:01,925 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34141:34141)] 2024-11-17T22:48:01,925 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 is not closed yet, will try archiving it next time 2024-11-17T22:48:01,925 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883679878 is not closed yet, will try archiving it next time 2024-11-17T22:48:01,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/186c7f7191a84ec394927512beff5bb5 is 1080, key is row0002/info:/1731883677888/Put/seqid=0 2024-11-17T22:48:01,937 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,937 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK], DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:01,937 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741849_1032 2024-11-17T22:48:01,938 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:01,939 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,939 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:48:01,939 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741850_1033 2024-11-17T22:48:01,939 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:48:01,941 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,941 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:48:01,941 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741851_1034 2024-11-17T22:48:01,941 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK] 2024-11-17T22:48:01,943 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34923 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:01,943 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44134 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741852_1035 to mirror 127.0.0.1:34923 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:01,943 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]) is bad. 2024-11-17T22:48:01,943 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741852_1035 2024-11-17T22:48:01,943 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44134 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:01,944 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44134 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44134 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:01,944 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK] 2024-11-17T22:48:01,945 WARN [IPC Server handler 4 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T22:48:01,945 WARN [IPC Server handler 4 on default port 39901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T22:48:01,945 WARN [IPC Server handler 4 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T22:48:01,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741853_1036 (size=10347) 2024-11-17T22:48:02,317 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:02,327 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 is not closed yet, will try archiving it next time 2024-11-17T22:48:02,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/186c7f7191a84ec394927512beff5bb5 2024-11-17T22:48:02,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/186c7f7191a84ec394927512beff5bb5 as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/186c7f7191a84ec394927512beff5bb5 2024-11-17T22:48:02,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/186c7f7191a84ec394927512beff5bb5, entries=5, sequenceid=11, filesize=10.1 K 2024-11-17T22:48:02,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 4ac114b5e436507fa65c786b22107866 in 460ms, sequenceid=11, compaction requested=false 2024-11-17T22:48:02,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4ac114b5e436507fa65c786b22107866: 2024-11-17T22:48:02,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45593 {}] regionserver.HRegion(8855): Flush requested on 4ac114b5e436507fa65c786b22107866 2024-11-17T22:48:02,544 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4ac114b5e436507fa65c786b22107866 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-17T22:48:02,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/96cc7c11127b4be39ef14e62a3ee560b is 1080, key is row0007/info:/1731883681916/Put/seqid=0 2024-11-17T22:48:02,557 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36249 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:02,557 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44160 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741854_1037 to mirror 127.0.0.1:36249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:02,557 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:02,558 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741854_1037 2024-11-17T22:48:02,558 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44160 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:02,558 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44160 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44160 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:02,558 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:02,559 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:02,560 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]) is bad. 2024-11-17T22:48:02,560 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741855_1038 2024-11-17T22:48:02,560 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK] 2024-11-17T22:48:02,562 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42011 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:02,562 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44172 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741856_1039 to mirror 127.0.0.1:42011 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:02,563 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:48:02,563 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741856_1039 2024-11-17T22:48:02,563 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44172 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:02,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44172 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44172 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:02,563 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:48:02,566 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36677 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:02,566 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44184 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741857_1040 to mirror 127.0.0.1:36677 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:02,566 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:48:02,566 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741857_1040 2024-11-17T22:48:02,566 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44184 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:02,566 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44184 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44184 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:02,567 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK] 2024-11-17T22:48:02,567 WARN [IPC Server handler 0 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T22:48:02,568 WARN [IPC Server handler 0 on default port 39901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T22:48:02,568 WARN [IPC Server handler 0 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T22:48:02,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741858_1041 (size=12506) 2024-11-17T22:48:02,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/96cc7c11127b4be39ef14e62a3ee560b 2024-11-17T22:48:02,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/96cc7c11127b4be39ef14e62a3ee560b as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/96cc7c11127b4be39ef14e62a3ee560b 2024-11-17T22:48:02,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/96cc7c11127b4be39ef14e62a3ee560b, entries=7, sequenceid=24, filesize=12.2 K 2024-11-17T22:48:02,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 4ac114b5e436507fa65c786b22107866 in 445ms, sequenceid=24, compaction requested=false 2024-11-17T22:48:02,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4ac114b5e436507fa65c786b22107866: 2024-11-17T22:48:02,989 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-17T22:48:02,989 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:02,989 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/96cc7c11127b4be39ef14e62a3ee560b because midkey is the same as first or last row 2024-11-17T22:48:03,252 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,926 WARN [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]] 2024-11-17T22:48:03,926 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,926 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45593%2C1731883664234:(num 1731883681901) roll requested 2024-11-17T22:48:03,927 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45593%2C1731883664234.1731883683926 2024-11-17T22:48:03,930 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,930 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:03,930 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741859_1042 2024-11-17T22:48:03,931 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:03,932 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,933 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:48:03,933 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741860_1043 2024-11-17T22:48:03,933 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:48:03,935 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,935 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]) is bad. 2024-11-17T22:48:03,935 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741861_1044 2024-11-17T22:48:03,936 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK] 2024-11-17T22:48:03,938 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36677 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44204 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741862_1045 to mirror 127.0.0.1:36677 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:03,938 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:48:03,938 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741862_1045 2024-11-17T22:48:03,938 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44204 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-17T22:48:03,939 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44204 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44204 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:03,939 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK] 2024-11-17T22:48:03,940 WARN [IPC Server handler 2 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T22:48:03,940 WARN [IPC Server handler 2 on default port 39901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T22:48:03,940 WARN [IPC Server handler 2 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T22:48:03,942 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:03,942 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:03,943 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:03,943 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:03,943 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:03,943 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883681901 with entries=16, filesize=15.93 KB; new WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883683926 2024-11-17T22:48:03,944 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34141:34141)] 2024-11-17T22:48:03,944 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 is not closed yet, will try archiving it next time 2024-11-17T22:48:03,944 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883681901 is not closed yet, will try archiving it next time 2024-11-17T22:48:03,944 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883675856 to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs/1a6e40b21a48%2C45593%2C1731883664234.1731883675856 2024-11-17T22:48:03,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741848_1031 (size=16317) 2024-11-17T22:48:03,946 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883679878 to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs/1a6e40b21a48%2C45593%2C1731883664234.1731883679878 2024-11-17T22:48:03,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45593 {}] regionserver.HRegion(8855): Flush requested on 4ac114b5e436507fa65c786b22107866 2024-11-17T22:48:03,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4ac114b5e436507fa65c786b22107866 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T22:48:03,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/7963da384c50415aa7a16afc15ee8006 is 1079, key is tmprow/info:/1731883683970/Put/seqid=0 2024-11-17T22:48:03,977 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,978 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:48:03,978 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741864_1047 2024-11-17T22:48:03,978 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:48:03,980 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36677 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44224 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741865_1048 to mirror 127.0.0.1:36677 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:03,980 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:48:03,980 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741865_1048 2024-11-17T22:48:03,980 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44224 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:03,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44224 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44224 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:03,981 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK] 2024-11-17T22:48:03,982 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,982 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]) is bad. 2024-11-17T22:48:03,982 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741866_1049 2024-11-17T22:48:03,983 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK] 2024-11-17T22:48:03,985 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36249 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:03,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44240 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741867_1050 to mirror 127.0.0.1:36249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:03,985 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:03,985 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741867_1050 2024-11-17T22:48:03,985 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44240 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:03,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:44240 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44240 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:03,985 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:03,986 WARN [IPC Server handler 1 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T22:48:03,986 WARN [IPC Server handler 1 on default port 39901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T22:48:03,986 WARN [IPC Server handler 1 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T22:48:03,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741868_1051 (size=6027) 2024-11-17T22:48:04,318 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:04,346 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 is not closed yet, will try archiving it next time 2024-11-17T22:48:04,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/7963da384c50415aa7a16afc15ee8006 2024-11-17T22:48:04,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/7963da384c50415aa7a16afc15ee8006 as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/7963da384c50415aa7a16afc15ee8006 2024-11-17T22:48:04,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/7963da384c50415aa7a16afc15ee8006, entries=1, sequenceid=34, filesize=5.9 K 2024-11-17T22:48:04,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4ac114b5e436507fa65c786b22107866 in 437ms, sequenceid=34, compaction requested=true 2024-11-17T22:48:04,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4ac114b5e436507fa65c786b22107866: 2024-11-17T22:48:04,408 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-17T22:48:04,408 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:04,409 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/96cc7c11127b4be39ef14e62a3ee560b because midkey is the same as first or last row 2024-11-17T22:48:04,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ac114b5e436507fa65c786b22107866:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:48:04,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:48:04,409 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:48:04,410 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:48:04,410 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HStore(1541): 4ac114b5e436507fa65c786b22107866/info is initiating minor compaction (all files) 2024-11-17T22:48:04,411 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4ac114b5e436507fa65c786b22107866/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:48:04,411 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/186c7f7191a84ec394927512beff5bb5, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/96cc7c11127b4be39ef14e62a3ee560b, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/7963da384c50415aa7a16afc15ee8006] into tmpdir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp, totalSize=28.2 K 2024-11-17T22:48:04,411 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 186c7f7191a84ec394927512beff5bb5, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731883677888 2024-11-17T22:48:04,412 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96cc7c11127b4be39ef14e62a3ee560b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731883681916 2024-11-17T22:48:04,412 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7963da384c50415aa7a16afc15ee8006, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731883683970 2024-11-17T22:48:04,425 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ac114b5e436507fa65c786b22107866#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:48:04,426 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/47529d08df2d421297cce7fdcd90baaf is 1080, key is row0002/info:/1731883677888/Put/seqid=0 2024-11-17T22:48:04,427 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:04,428 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]) is bad. 2024-11-17T22:48:04,428 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741869_1052 2024-11-17T22:48:04,428 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK] 2024-11-17T22:48:04,430 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:04,430 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:04,430 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741870_1053 2024-11-17T22:48:04,431 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:04,432 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:04,433 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:48:04,433 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741871_1054 2024-11-17T22:48:04,433 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:48:04,434 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:04,435 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:48:04,435 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741872_1055 2024-11-17T22:48:04,435 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK] 2024-11-17T22:48:04,436 WARN [IPC Server handler 3 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T22:48:04,436 WARN [IPC Server handler 3 on default port 39901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T22:48:04,436 WARN [IPC Server handler 3 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T22:48:04,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741873_1056 (size=17994) 2024-11-17T22:48:04,850 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/47529d08df2d421297cce7fdcd90baaf as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf 2024-11-17T22:48:04,859 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4ac114b5e436507fa65c786b22107866/info of 4ac114b5e436507fa65c786b22107866 into 47529d08df2d421297cce7fdcd90baaf(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4ac114b5e436507fa65c786b22107866: 2024-11-17T22:48:04,859 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866., storeName=4ac114b5e436507fa65c786b22107866/info, priority=13, startTime=1731883684409; duration=0sec 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf because midkey is the same as first or last row 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf because midkey is the same as first or last row 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T22:48:04,859 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:04,860 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf because midkey is the same as first or last row 2024-11-17T22:48:04,860 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:48:04,860 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ac114b5e436507fa65c786b22107866:info 2024-11-17T22:48:05,253 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45593 {}] regionserver.HRegion(8855): Flush requested on 4ac114b5e436507fa65c786b22107866 2024-11-17T22:48:05,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4ac114b5e436507fa65c786b22107866 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T22:48:05,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/ea3fa9e0b4a842d5be7087ced117c99b is 1079, key is tmprow/info:/1731883685396/Put/seqid=0 2024-11-17T22:48:05,404 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:05,404 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK], DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]) is bad. 2024-11-17T22:48:05,404 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741874_1057 2024-11-17T22:48:05,405 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK] 2024-11-17T22:48:05,406 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:05,406 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]) is bad. 2024-11-17T22:48:05,406 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741875_1058 2024-11-17T22:48:05,407 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42011,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK] 2024-11-17T22:48:05,408 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:05,408 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK]) is bad. 2024-11-17T22:48:05,408 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741876_1059 2024-11-17T22:48:05,408 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36677,DS-ec690503-8b72-4dcd-98fc-704818383b02,DISK] 2024-11-17T22:48:05,409 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:05,409 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:05,410 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741877_1060 2024-11-17T22:48:05,410 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:05,411 WARN [IPC Server handler 0 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T22:48:05,411 WARN [IPC Server handler 0 on default port 39901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T22:48:05,411 WARN [IPC Server handler 0 on default port 39901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T22:48:05,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741878_1061 (size=6027) 2024-11-17T22:48:05,656 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@30bf5742[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741843_1026 to 127.0.0.1:42011 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:05,657 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7828a44c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741853_1036 to 127.0.0.1:36677 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:05,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/ea3fa9e0b4a842d5be7087ced117c99b 2024-11-17T22:48:05,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/ea3fa9e0b4a842d5be7087ced117c99b as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/ea3fa9e0b4a842d5be7087ced117c99b 2024-11-17T22:48:05,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/ea3fa9e0b4a842d5be7087ced117c99b, entries=1, sequenceid=45, filesize=5.9 K 2024-11-17T22:48:05,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4ac114b5e436507fa65c786b22107866 in 436ms, sequenceid=45, compaction requested=false 2024-11-17T22:48:05,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4ac114b5e436507fa65c786b22107866: 2024-11-17T22:48:05,833 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-17T22:48:05,833 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:05,834 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf because midkey is the same as first or last row 2024-11-17T22:48:05,945 WARN [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-17T22:48:05,945 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:06,015 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:06,020 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:48:06,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:48:06,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:48:06,021 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:48:06,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d04364e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:48:06,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@568b1686{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:48:06,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3740407e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/java.io.tmpdir/jetty-localhost-46855-hadoop-hdfs-3_4_1-tests_jar-_-any-15107629832670724005/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:06,118 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e234cf7{HTTP/1.1, (http/1.1)}{localhost:46855} 2024-11-17T22:48:06,118 INFO [Time-limited test {}] server.Server(415): Started @127142ms 2024-11-17T22:48:06,119 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:48:06,192 WARN [Thread-986 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:48:06,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2543253dc3985eca with lease ID 0x94736aff417bc173: from storage DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b node DatanodeRegistration(127.0.0.1:39203, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=41917, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T22:48:06,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2543253dc3985eca with lease ID 0x94736aff417bc173: from storage DS-a55a51f4-5881-4ae7-9da5-8fe38c57ec85 node DatanodeRegistration(127.0.0.1:39203, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=41917, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:06,318 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:06,650 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@30bf5742[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741848_1031 to 127.0.0.1:36249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:06,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741858_1041 (size=12506) 2024-11-17T22:48:07,253 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:07,945 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:08,319 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:08,651 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7828a44c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741868_1051 to 127.0.0.1:36249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:08,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741873_1056 (size=17994) 2024-11-17T22:48:09,254 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:09,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741878_1061 (size=6027) 2024-11-17T22:48:09,946 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:10,319 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:11,255 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:11,946 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:12,320 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:13,255 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:13,946 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:14,165 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T22:48:14,320 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:14,418 ERROR [FSHLog-0-hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData-prefix:1a6e40b21a48,44599,1731883664183 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:14,418 WARN [FSHLog-0-hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData-prefix:1a6e40b21a48,44599,1731883664183 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:14,419 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C44599%2C1731883664183:(num 1731883664335) roll requested 2024-11-17T22:48:14,420 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C44599%2C1731883664183.1731883694419 2024-11-17T22:48:14,424 WARN [Thread-1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:14,425 WARN [Thread-1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK], DatanodeInfoWithStorage[127.0.0.1:39203,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:14,425 WARN [Thread-1009 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741879_1062 2024-11-17T22:48:14,426 WARN [Thread-1009 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:14,431 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:14,431 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:14,431 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:14,431 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:14,431 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:14,432 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883694419 2024-11-17T22:48:14,432 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:14,432 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:14,432 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 2024-11-17T22:48:14,433 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34141:34141),(127.0.0.1/127.0.0.1:41917:41917)] 2024-11-17T22:48:14,433 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 is not closed yet, will try archiving it next time 2024-11-17T22:48:14,433 WARN [IPC Server handler 0 on default port 39901 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1014 2024-11-17T22:48:14,433 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 after 1ms 2024-11-17T22:48:15,256 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:15,947 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:16,217 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5a6a8cd {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:34923,null,null]) java.net.ConnectException: Call From 1a6e40b21a48/172.17.0.2 to localhost:41437 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-17T22:48:16,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741833_1020 (size=455) 2024-11-17T22:48:16,904 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs/1a6e40b21a48%2C45593%2C1731883664234.1731883664631 2024-11-17T22:48:16,906 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883681901 to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs/1a6e40b21a48%2C45593%2C1731883664234.1731883681901 2024-11-17T22:48:17,196 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2dfcf799[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39203, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=41917, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741833_1020 to 127.0.0.1:36677 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:17,256 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:17,948 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:18,436 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 after 4004ms 2024-11-17T22:48:19,257 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:19,948 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:20,196 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2dfcf799[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39203, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=41917, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741835_1011 to 127.0.0.1:36249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:20,197 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@34e96cc2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39203, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=41917, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741831_1007 to 127.0.0.1:36249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:21,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:48:21,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:48:21,258 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:21,560 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45593%2C1731883664234.1731883701560 2024-11-17T22:48:21,570 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:21,570 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:21,570 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:21,571 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:21,571 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:21,571 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883683926 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883701560 2024-11-17T22:48:21,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741863_1046 (size=13591) 2024-11-17T22:48:21,573 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34141:34141),(127.0.0.1/127.0.0.1:41917:41917)] 2024-11-17T22:48:21,573 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883683926 is not closed yet, will try archiving it next time 2024-11-17T22:48:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45593 {}] regionserver.HRegion(8855): Flush requested on 4ac114b5e436507fa65c786b22107866 2024-11-17T22:48:21,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4ac114b5e436507fa65c786b22107866 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T22:48:21,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/a55f482c47a3410e968658ae9601572f is 1080, key is row0013/info:/1731883701574/Put/seqid=0 2024-11-17T22:48:21,594 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36249 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:21,594 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:59300 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741882_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8]'}, localName='127.0.0.1:44801', datanodeUuid='a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741882_1066 to mirror 127.0.0.1:36249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:21,594 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK], DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:21,594 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741882_1066 2024-11-17T22:48:21,594 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:59300 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741882_1066] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:21,594 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:59300 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741882_1066] {}] datanode.DataXceiver(331): 127.0.0.1:44801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59300 dst: /127.0.0.1:44801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:21,595 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:21,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741883_1067 (size=11421) 2024-11-17T22:48:21,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741883_1067 (size=11421) 2024-11-17T22:48:21,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/a55f482c47a3410e968658ae9601572f 2024-11-17T22:48:21,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/a55f482c47a3410e968658ae9601572f as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/a55f482c47a3410e968658ae9601572f 2024-11-17T22:48:21,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/a55f482c47a3410e968658ae9601572f, entries=6, sequenceid=55, filesize=11.2 K 2024-11-17T22:48:21,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 4ac114b5e436507fa65c786b22107866 in 31ms, sequenceid=55, compaction requested=true 2024-11-17T22:48:21,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4ac114b5e436507fa65c786b22107866: 2024-11-17T22:48:21,616 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-17T22:48:21,616 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:21,616 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf because midkey is the same as first or last row 2024-11-17T22:48:21,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4ac114b5e436507fa65c786b22107866:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:48:21,616 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:48:21,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:48:21,617 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:48:21,617 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HStore(1541): 4ac114b5e436507fa65c786b22107866/info is initiating minor compaction (all files) 2024-11-17T22:48:21,617 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4ac114b5e436507fa65c786b22107866/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:48:21,618 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/ea3fa9e0b4a842d5be7087ced117c99b, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/a55f482c47a3410e968658ae9601572f] into tmpdir=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp, totalSize=34.6 K 2024-11-17T22:48:21,618 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 47529d08df2d421297cce7fdcd90baaf, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731883677888 2024-11-17T22:48:21,618 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.Compactor(225): Compacting ea3fa9e0b4a842d5be7087ced117c99b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731883685396 2024-11-17T22:48:21,619 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] compactions.Compactor(225): Compacting a55f482c47a3410e968658ae9601572f, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731883685802 2024-11-17T22:48:21,635 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4ac114b5e436507fa65c786b22107866#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:48:21,635 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/5860c3f0dbc448048e9c9cb8344877f0 is 1080, key is row0002/info:/1731883677888/Put/seqid=0 2024-11-17T22:48:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741884_1068 (size=23502) 2024-11-17T22:48:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741884_1068 (size=23502) 2024-11-17T22:48:21,647 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/5860c3f0dbc448048e9c9cb8344877f0 as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/5860c3f0dbc448048e9c9cb8344877f0 2024-11-17T22:48:21,655 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4ac114b5e436507fa65c786b22107866/info of 4ac114b5e436507fa65c786b22107866 into 5860c3f0dbc448048e9c9cb8344877f0(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4ac114b5e436507fa65c786b22107866: 2024-11-17T22:48:21,655 INFO [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866., storeName=4ac114b5e436507fa65c786b22107866/info, priority=13, startTime=1731883701616; duration=0sec 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/5860c3f0dbc448048e9c9cb8344877f0 because midkey is the same as first or last row 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/5860c3f0dbc448048e9c9cb8344877f0 because midkey is the same as first or last row 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/5860c3f0dbc448048e9c9cb8344877f0 because midkey is the same as first or last row 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:48:21,655 DEBUG [RS:0;1a6e40b21a48:45593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4ac114b5e436507fa65c786b22107866:info 2024-11-17T22:48:21,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45593 {}] regionserver.HRegion(8855): Flush requested on 4ac114b5e436507fa65c786b22107866 2024-11-17T22:48:21,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4ac114b5e436507fa65c786b22107866 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T22:48:21,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/0e69b3b37eec417e8d1c482bb5efdb3d is 1080, key is row0018/info:/1731883701585/Put/seqid=0 2024-11-17T22:48:21,810 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:21,810 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK], DatanodeInfoWithStorage[127.0.0.1:39203,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:21,810 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741885_1069 2024-11-17T22:48:21,811 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:21,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741886_1070 (size=11421) 2024-11-17T22:48:21,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741886_1070 (size=11421) 2024-11-17T22:48:21,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/0e69b3b37eec417e8d1c482bb5efdb3d 2024-11-17T22:48:21,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/.tmp/info/0e69b3b37eec417e8d1c482bb5efdb3d as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/0e69b3b37eec417e8d1c482bb5efdb3d 2024-11-17T22:48:21,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/0e69b3b37eec417e8d1c482bb5efdb3d, entries=6, sequenceid=66, filesize=11.2 K 2024-11-17T22:48:21,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 4ac114b5e436507fa65c786b22107866 in 28ms, sequenceid=66, compaction requested=false 2024-11-17T22:48:21,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4ac114b5e436507fa65c786b22107866: 2024-11-17T22:48:21,829 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-17T22:48:21,829 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:48:21,829 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/5860c3f0dbc448048e9c9cb8344877f0 because midkey is the same as first or last row 2024-11-17T22:48:21,949 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-17T22:48:21,949 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:21,974 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.1731883683926 to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs/1a6e40b21a48%2C45593%2C1731883664234.1731883683926 2024-11-17T22:48:22,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T22:48:22,002 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:48:22,003 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:48:22,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:22,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:22,003 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T22:48:22,003 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T22:48:22,003 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=924258338, stopped=false 2024-11-17T22:48:22,004 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a6e40b21a48,44599,1731883664183 2024-11-17T22:48:22,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:48:22,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:48:22,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:22,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:48:22,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:22,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:22,005 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:48:22,005 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:48:22,006 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:48:22,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:22,006 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a6e40b21a48,45593,1731883664234' ***** 2024-11-17T22:48:22,006 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T22:48:22,006 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a6e40b21a48,43411,1731883665200' ***** 2024-11-17T22:48:22,006 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T22:48:22,006 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:48:22,007 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:48:22,007 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T22:48:22,007 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T22:48:22,007 INFO [RS:1;1a6e40b21a48:43411 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T22:48:22,007 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:48:22,007 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T22:48:22,007 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T22:48:22,007 INFO [RS:1;1a6e40b21a48:43411 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T22:48:22,007 INFO [RS:0;1a6e40b21a48:45593 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T22:48:22,007 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(959): stopping server 1a6e40b21a48,43411,1731883665200 2024-11-17T22:48:22,007 INFO [RS:0;1a6e40b21a48:45593 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T22:48:22,007 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:48:22,007 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(3091): Received CLOSE for 4ac114b5e436507fa65c786b22107866 2024-11-17T22:48:22,007 INFO [RS:1;1a6e40b21a48:43411 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;1a6e40b21a48:43411. 2024-11-17T22:48:22,008 DEBUG [RS:1;1a6e40b21a48:43411 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:48:22,008 DEBUG [RS:1;1a6e40b21a48:43411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:22,008 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(959): stopping server 1a6e40b21a48,45593,1731883664234 2024-11-17T22:48:22,008 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:48:22,008 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(976): stopping server 1a6e40b21a48,43411,1731883665200; all regions closed. 2024-11-17T22:48:22,008 INFO [RS:0;1a6e40b21a48:45593 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a6e40b21a48:45593. 2024-11-17T22:48:22,008 DEBUG [RS:0;1a6e40b21a48:45593 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:48:22,008 DEBUG [RS:0;1a6e40b21a48:45593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:22,008 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4ac114b5e436507fa65c786b22107866, disabling compactions & flushes 2024-11-17T22:48:22,008 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,008 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T22:48:22,008 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:48:22,008 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T22:48:22,008 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T22:48:22,008 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,009 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T22:48:22,009 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,009 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,009 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:48:22,009 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,009 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. after waiting 0 ms 2024-11-17T22:48:22,009 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T22:48:22,009 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:48:22,009 DEBUG [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4ac114b5e436507fa65c786b22107866=TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.} 2024-11-17T22:48:22,009 DEBUG [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4ac114b5e436507fa65c786b22107866 2024-11-17T22:48:22,009 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:48:22,009 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:48:22,009 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:48:22,009 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:48:22,009 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,009 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/186c7f7191a84ec394927512beff5bb5, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/96cc7c11127b4be39ef14e62a3ee560b, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/7963da384c50415aa7a16afc15ee8006, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/ea3fa9e0b4a842d5be7087ced117c99b, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/a55f482c47a3410e968658ae9601572f] to archive 2024-11-17T22:48:22,010 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:48:22,010 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,010 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 2024-11-17T22:48:22,010 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-17T22:48:22,010 WARN [IPC Server handler 4 on default port 39901 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741837_1015 2024-11-17T22:48:22,010 ERROR [FSHLog-0-hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048-prefix:1a6e40b21a48,45593,1731883664234.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,010 WARN [FSHLog-0-hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048-prefix:1a6e40b21a48,45593,1731883664234.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,011 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45593%2C1731883664234.meta:.meta(num 1731883665049) roll requested 2024-11-17T22:48:22,011 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45593%2C1731883664234.meta.1731883702011.meta 2024-11-17T22:48:22,011 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T22:48:22,011 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 after 1ms 2024-11-17T22:48:22,013 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/186c7f7191a84ec394927512beff5bb5 to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/186c7f7191a84ec394927512beff5bb5 2024-11-17T22:48:22,014 WARN [Thread-1048 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,014 WARN [Thread-1048 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741887_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK], DatanodeInfoWithStorage[127.0.0.1:44801,DS-d72c0fef-aeef-4e18-9b7d-3699c07c5d23,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:22,014 WARN [Thread-1048 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741887_1072 2024-11-17T22:48:22,015 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/96cc7c11127b4be39ef14e62a3ee560b to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/96cc7c11127b4be39ef14e62a3ee560b 2024-11-17T22:48:22,015 WARN [Thread-1048 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:22,016 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/47529d08df2d421297cce7fdcd90baaf 2024-11-17T22:48:22,017 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/7963da384c50415aa7a16afc15ee8006 to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/7963da384c50415aa7a16afc15ee8006 2024-11-17T22:48:22,019 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/ea3fa9e0b4a842d5be7087ced117c99b to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/ea3fa9e0b4a842d5be7087ced117c99b 2024-11-17T22:48:22,019 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,019 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,019 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,019 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,019 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,020 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883702011.meta 2024-11-17T22:48:22,020 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,020 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34923,DS-cce1ce32-741a-4bc1-bf14-539b9d8b06f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,020 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta 2024-11-17T22:48:22,020 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/a55f482c47a3410e968658ae9601572f to hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/info/a55f482c47a3410e968658ae9601572f 2024-11-17T22:48:22,020 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34141:34141),(127.0.0.1/127.0.0.1:41917:41917)] 2024-11-17T22:48:22,020 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta is not closed yet, will try archiving it next time 2024-11-17T22:48:22,021 WARN [IPC Server handler 0 on default port 39901 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741834_1010 2024-11-17T22:48:22,021 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta after 1ms 2024-11-17T22:48:22,021 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a6e40b21a48:44599 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-17T22:48:22,021 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [186c7f7191a84ec394927512beff5bb5=10347, 96cc7c11127b4be39ef14e62a3ee560b=12506, 47529d08df2d421297cce7fdcd90baaf=17994, 7963da384c50415aa7a16afc15ee8006=6027, ea3fa9e0b4a842d5be7087ced117c99b=6027, a55f482c47a3410e968658ae9601572f=11421] 2024-11-17T22:48:22,024 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ac114b5e436507fa65c786b22107866/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-17T22:48:22,025 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:48:22,025 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4ac114b5e436507fa65c786b22107866: Waiting for close lock at 1731883702008Running coprocessor pre-close hooks at 1731883702008Disabling compacts and flushes for region at 1731883702008Disabling writes for close at 1731883702009 (+1 ms)Writing region close event to WAL at 1731883702021 (+12 ms)Running coprocessor post-close hooks at 1731883702025 (+4 ms)Closed at 1731883702025 2024-11-17T22:48:22,025 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866. 2024-11-17T22:48:22,037 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/info/b912c9c302d64cc49dc6e6b7a4e07b5e is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731883665287.4ac114b5e436507fa65c786b22107866./info:regioninfo/1731883665652/Put/seqid=0 2024-11-17T22:48:22,041 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36249 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:55016 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data4]'}, localName='127.0.0.1:39203', datanodeUuid='49dcf81b-409b-44ba-a903-7dd5ed0cf385', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741889_1075 to mirror 127.0.0.1:36249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:22,042 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39203,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:22,042 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741889_1075 2024-11-17T22:48:22,042 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:55016 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:22,042 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:55016 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:39203:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55016 dst: /127.0.0.1:39203 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:22,042 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:22,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741890_1076 (size=7089) 2024-11-17T22:48:22,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741890_1076 (size=7089) 2024-11-17T22:48:22,047 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/info/b912c9c302d64cc49dc6e6b7a4e07b5e 2024-11-17T22:48:22,067 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/ns/3ca37786caa840e19ed2c81b6f1e760d is 43, key is default/ns:d/1731883665098/Put/seqid=0 2024-11-17T22:48:22,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741891_1077 (size=5153) 2024-11-17T22:48:22,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741891_1077 (size=5153) 2024-11-17T22:48:22,072 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/ns/3ca37786caa840e19ed2c81b6f1e760d 2024-11-17T22:48:22,090 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/table/0a66061649d34618add298893ef74836 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731883665664/Put/seqid=0 2024-11-17T22:48:22,092 WARN [Thread-1068 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36249 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:22,092 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:55052 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741892_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data4]'}, localName='127.0.0.1:39203', datanodeUuid='49dcf81b-409b-44ba-a903-7dd5ed0cf385', xmitsInProgress=0}:Exception transferring block BP-567024958-172.17.0.2-1731883663595:blk_1073741892_1078 to mirror 127.0.0.1:36249 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:22,093 WARN [Thread-1068 {}] hdfs.DataStreamer(1731): Error Recovery for BP-567024958-172.17.0.2-1731883663595:blk_1073741892_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39203,DS-ee42e394-ae4a-48ad-b9ca-cf41e49e265b,DISK], DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK]) is bad. 2024-11-17T22:48:22,093 WARN [Thread-1068 {}] hdfs.DataStreamer(1850): Abandoning BP-567024958-172.17.0.2-1731883663595:blk_1073741892_1078 2024-11-17T22:48:22,093 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:55052 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741892_1078] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T22:48:22,093 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670517417_22 at /127.0.0.1:55052 [Receiving block BP-567024958-172.17.0.2-1731883663595:blk_1073741892_1078] {}] datanode.DataXceiver(331): 127.0.0.1:39203:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55052 dst: /127.0.0.1:39203 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:22,093 WARN [Thread-1068 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36249,DS-d8092e01-1ba1-4557-b096-5bf660fc2c6a,DISK] 2024-11-17T22:48:22,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741893_1079 (size=5424) 2024-11-17T22:48:22,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741893_1079 (size=5424) 2024-11-17T22:48:22,098 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/table/0a66061649d34618add298893ef74836 2024-11-17T22:48:22,106 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/info/b912c9c302d64cc49dc6e6b7a4e07b5e as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/info/b912c9c302d64cc49dc6e6b7a4e07b5e 2024-11-17T22:48:22,112 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/info/b912c9c302d64cc49dc6e6b7a4e07b5e, entries=10, sequenceid=11, filesize=6.9 K 2024-11-17T22:48:22,114 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/ns/3ca37786caa840e19ed2c81b6f1e760d as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/ns/3ca37786caa840e19ed2c81b6f1e760d 2024-11-17T22:48:22,121 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/ns/3ca37786caa840e19ed2c81b6f1e760d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T22:48:22,122 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/.tmp/table/0a66061649d34618add298893ef74836 as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/table/0a66061649d34618add298893ef74836 2024-11-17T22:48:22,128 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/table/0a66061649d34618add298893ef74836, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T22:48:22,129 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 119ms, sequenceid=11, compaction requested=false 2024-11-17T22:48:22,134 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T22:48:22,135 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:48:22,135 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:48:22,135 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883702009Running coprocessor pre-close hooks at 1731883702009Disabling compacts and flushes for region at 1731883702009Disabling writes for close at 1731883702009Obtaining lock to block concurrent updates at 1731883702010 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731883702010Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731883702011 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731883702021 (+10 ms)Flushing 1588230740/info: creating writer at 1731883702021Flushing 1588230740/info: appending metadata at 1731883702037 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731883702037Flushing 1588230740/ns: creating writer at 1731883702053 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731883702066 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731883702066Flushing 1588230740/table: creating writer at 1731883702077 (+11 ms)Flushing 1588230740/table: appending metadata at 1731883702090 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731883702090Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b09f3e5: reopening flushed file at 1731883702105 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@456d59a4: reopening flushed file at 1731883702113 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73968848: reopening flushed file at 1731883702121 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 119ms, sequenceid=11, compaction requested=false at 1731883702129 (+8 ms)Writing region close event to WAL at 1731883702131 (+2 ms)Running coprocessor post-close hooks at 1731883702135 (+4 ms)Closed at 1731883702135 2024-11-17T22:48:22,135 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T22:48:22,209 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(976): stopping server 1a6e40b21a48,45593,1731883664234; all regions closed. 2024-11-17T22:48:22,210 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,210 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,210 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,210 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,210 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:22,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741888_1073 (size=825) 2024-11-17T22:48:22,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741888_1073 (size=825) 2024-11-17T22:48:22,323 INFO [regionserver/1a6e40b21a48:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T22:48:22,323 INFO [regionserver/1a6e40b21a48:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T22:48:22,496 INFO [regionserver/1a6e40b21a48:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:48:22,576 INFO [regionserver/1a6e40b21a48:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T22:48:22,576 INFO [regionserver/1a6e40b21a48:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T22:48:23,197 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@34e96cc2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39203, datanodeUuid=49dcf81b-409b-44ba-a903-7dd5ed0cf385, infoPort=41917, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741832_1008 to 127.0.0.1:36249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:23,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:48:23,253 INFO [regionserver/1a6e40b21a48:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:48:23,656 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@30bf5742[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44801, datanodeUuid=a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a, infoPort=34141, infoSecurePort=0, ipcPort=43729, storageInfo=lv=-57;cid=testClusterID;nsid=889579875;c=1731883663595):Failed to transfer BP-567024958-172.17.0.2-1731883663595:blk_1073741863_1046 to 127.0.0.1:36249 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:24,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:48:24,902 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T22:48:24,904 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:48:24,905 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T22:48:25,920 INFO [master/1a6e40b21a48:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T22:48:25,920 INFO [master/1a6e40b21a48:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T22:48:26,012 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 after 4002ms 2024-11-17T22:48:26,022 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta after 4002ms 2024-11-17T22:48:26,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:48:26,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741836_1012 (size=76) 2024-11-17T22:48:27,010 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-17T22:48:27,015 DEBUG [RS:1;1a6e40b21a48:43411 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs 2024-11-17T22:48:27,015 INFO [RS:1;1a6e40b21a48:43411 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C43411%2C1731883665200:(num 1731883665392) 2024-11-17T22:48:27,015 DEBUG [RS:1;1a6e40b21a48:43411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:27,015 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:48:27,015 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:48:27,015 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.ChoreService(370): Chore service for: regionserver/1a6e40b21a48:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T22:48:27,016 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T22:48:27,016 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:48:27,016 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T22:48:27,016 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T22:48:27,016 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:48:27,017 INFO [RS:1;1a6e40b21a48:43411 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43411 2024-11-17T22:48:27,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a6e40b21a48,43411,1731883665200 2024-11-17T22:48:27,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:48:27,020 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:48:27,021 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a6e40b21a48,43411,1731883665200] 2024-11-17T22:48:27,022 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a6e40b21a48,43411,1731883665200 already deleted, retry=false 2024-11-17T22:48:27,022 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a6e40b21a48,43411,1731883665200 expired; onlineServers=1 2024-11-17T22:48:27,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,122 INFO [RS:1;1a6e40b21a48:43411 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:48:27,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:48:27,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43411-0x1004fdedf950002, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:48:27,122 INFO [RS:1;1a6e40b21a48:43411 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a6e40b21a48,43411,1731883665200; zookeeper connection closed. 2024-11-17T22:48:27,122 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6e1577a2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6e1577a2 2024-11-17T22:48:27,211 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-17T22:48:27,221 DEBUG [RS:0;1a6e40b21a48:45593 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs 2024-11-17T22:48:27,221 INFO [RS:0;1a6e40b21a48:45593 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C45593%2C1731883664234.meta:.meta(num 1731883702011) 2024-11-17T22:48:27,221 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,222 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,222 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,222 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,222 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741881_1065 (size=16308) 2024-11-17T22:48:27,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741881_1065 (size=16308) 2024-11-17T22:48:27,226 DEBUG [RS:0;1a6e40b21a48:45593 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/oldWALs 2024-11-17T22:48:27,226 INFO [RS:0;1a6e40b21a48:45593 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C45593%2C1731883664234:(num 1731883701560) 2024-11-17T22:48:27,226 DEBUG [RS:0;1a6e40b21a48:45593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:27,226 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:48:27,227 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:48:27,227 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.ChoreService(370): Chore service for: regionserver/1a6e40b21a48:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T22:48:27,227 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:48:27,227 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:48:27,227 INFO [RS:0;1a6e40b21a48:45593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45593 2024-11-17T22:48:27,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a6e40b21a48,45593,1731883664234 2024-11-17T22:48:27,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:48:27,228 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:48:27,229 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a6e40b21a48,45593,1731883664234] 2024-11-17T22:48:27,230 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a6e40b21a48,45593,1731883664234 already deleted, retry=false 2024-11-17T22:48:27,230 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a6e40b21a48,45593,1731883664234 expired; onlineServers=0 2024-11-17T22:48:27,230 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a6e40b21a48,44599,1731883664183' ***** 2024-11-17T22:48:27,230 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T22:48:27,230 INFO [M:0;1a6e40b21a48:44599 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:48:27,230 INFO [M:0;1a6e40b21a48:44599 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:48:27,230 DEBUG [M:0;1a6e40b21a48:44599 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T22:48:27,231 DEBUG [M:0;1a6e40b21a48:44599 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T22:48:27,231 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T22:48:27,231 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883664417 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883664417,5,FailOnTimeoutGroup] 2024-11-17T22:48:27,231 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883664417 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883664417,5,FailOnTimeoutGroup] 2024-11-17T22:48:27,231 INFO [M:0;1a6e40b21a48:44599 {}] hbase.ChoreService(370): Chore service for: master/1a6e40b21a48:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T22:48:27,231 INFO [M:0;1a6e40b21a48:44599 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:48:27,231 DEBUG [M:0;1a6e40b21a48:44599 {}] master.HMaster(1795): Stopping service threads 2024-11-17T22:48:27,231 INFO [M:0;1a6e40b21a48:44599 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T22:48:27,231 INFO [M:0;1a6e40b21a48:44599 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:48:27,231 INFO [M:0;1a6e40b21a48:44599 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T22:48:27,232 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T22:48:27,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T22:48:27,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:27,232 DEBUG [M:0;1a6e40b21a48:44599 {}] zookeeper.ZKUtil(347): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T22:48:27,232 WARN [M:0;1a6e40b21a48:44599 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T22:48:27,233 INFO [M:0;1a6e40b21a48:44599 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/.lastflushedseqids 2024-11-17T22:48:27,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741894_1080 (size=130) 2024-11-17T22:48:27,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741894_1080 (size=130) 2024-11-17T22:48:27,239 INFO [M:0;1a6e40b21a48:44599 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T22:48:27,239 INFO [M:0;1a6e40b21a48:44599 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T22:48:27,239 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:48:27,239 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:48:27,239 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:48:27,239 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:48:27,239 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:48:27,239 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-17T22:48:27,254 DEBUG [M:0;1a6e40b21a48:44599 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/82cada78ee364b4ca4972e9486c5d613 is 82, key is hbase:meta,,1/info:regioninfo/1731883665083/Put/seqid=0 2024-11-17T22:48:27,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741895_1081 (size=5672) 2024-11-17T22:48:27,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741895_1081 (size=5672) 2024-11-17T22:48:27,259 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/82cada78ee364b4ca4972e9486c5d613 2024-11-17T22:48:27,280 DEBUG [M:0;1a6e40b21a48:44599 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3b32adfc37d845bdaac20604bc362427 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731883665670/Put/seqid=0 2024-11-17T22:48:27,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741896_1082 (size=6256) 2024-11-17T22:48:27,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741896_1082 (size=6256) 2024-11-17T22:48:27,285 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3b32adfc37d845bdaac20604bc362427 2024-11-17T22:48:27,291 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3b32adfc37d845bdaac20604bc362427 2024-11-17T22:48:27,305 DEBUG [M:0;1a6e40b21a48:44599 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2def431eac054e548dbc0c46fee5682e is 69, key is 1a6e40b21a48,43411,1731883665200/rs:state/1731883665236/Put/seqid=0 2024-11-17T22:48:27,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741897_1083 (size=5224) 2024-11-17T22:48:27,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741897_1083 (size=5224) 2024-11-17T22:48:27,310 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2def431eac054e548dbc0c46fee5682e 2024-11-17T22:48:27,328 DEBUG [M:0;1a6e40b21a48:44599 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ad68d0b24264794ae03c737aa3a6284 is 52, key is load_balancer_on/state:d/1731883665185/Put/seqid=0 2024-11-17T22:48:27,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:48:27,329 INFO [RS:0;1a6e40b21a48:45593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:48:27,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x1004fdedf950001, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:48:27,329 INFO [RS:0;1a6e40b21a48:45593 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a6e40b21a48,45593,1731883664234; zookeeper connection closed. 2024-11-17T22:48:27,330 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52949f52 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52949f52 2024-11-17T22:48:27,330 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-17T22:48:27,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741898_1084 (size=5056) 2024-11-17T22:48:27,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741898_1084 (size=5056) 2024-11-17T22:48:27,333 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ad68d0b24264794ae03c737aa3a6284 2024-11-17T22:48:27,338 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/82cada78ee364b4ca4972e9486c5d613 as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/82cada78ee364b4ca4972e9486c5d613 2024-11-17T22:48:27,344 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/82cada78ee364b4ca4972e9486c5d613, entries=8, sequenceid=60, filesize=5.5 K 2024-11-17T22:48:27,345 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3b32adfc37d845bdaac20604bc362427 as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3b32adfc37d845bdaac20604bc362427 2024-11-17T22:48:27,350 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3b32adfc37d845bdaac20604bc362427 2024-11-17T22:48:27,350 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3b32adfc37d845bdaac20604bc362427, entries=6, sequenceid=60, filesize=6.1 K 2024-11-17T22:48:27,351 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2def431eac054e548dbc0c46fee5682e as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2def431eac054e548dbc0c46fee5682e 2024-11-17T22:48:27,356 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2def431eac054e548dbc0c46fee5682e, entries=2, sequenceid=60, filesize=5.1 K 2024-11-17T22:48:27,357 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ad68d0b24264794ae03c737aa3a6284 as hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2ad68d0b24264794ae03c737aa3a6284 2024-11-17T22:48:27,362 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2ad68d0b24264794ae03c737aa3a6284, entries=1, sequenceid=60, filesize=4.9 K 2024-11-17T22:48:27,364 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=60, compaction requested=false 2024-11-17T22:48:27,365 INFO [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:48:27,365 DEBUG [M:0;1a6e40b21a48:44599 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883707239Disabling compacts and flushes for region at 1731883707239Disabling writes for close at 1731883707239Obtaining lock to block concurrent updates at 1731883707239Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731883707239Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731883707239Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731883707240 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731883707240Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731883707254 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731883707254Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731883707265 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731883707279 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731883707279Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731883707291 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731883707304 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731883707304Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731883707315 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731883707327 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731883707327Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3654c8eb: reopening flushed file at 1731883707338 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@213da61: reopening flushed file at 1731883707344 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34e556f0: reopening flushed file at 1731883707350 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4300b548: reopening flushed file at 1731883707356 (+6 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=60, compaction requested=false at 1731883707364 (+8 ms)Writing region close event to WAL at 1731883707365 (+1 ms)Closed at 1731883707365 2024-11-17T22:48:27,366 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,366 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,366 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,366 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,366 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:27,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39203 is added to blk_1073741880_1063 (size=1045) 2024-11-17T22:48:27,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44801 is added to blk_1073741880_1063 (size=1045) 2024-11-17T22:48:27,553 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:48:27,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:27,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:28,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:28,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:29,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:29,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:30,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:30,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:31,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:31,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:32,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:32,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:32,203 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@55596d24 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-567024958-172.17.0.2-1731883663595:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34923,null,null]) java.net.ConnectException: Call From 1a6e40b21a48/172.17.0.2 to localhost:41437 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-17T22:48:32,367 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-17T22:48:32,367 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:48:32,367 INFO [M:0;1a6e40b21a48:44599 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T22:48:32,368 INFO [M:0;1a6e40b21a48:44599 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44599 2024-11-17T22:48:32,368 INFO [M:0;1a6e40b21a48:44599 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:48:32,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:48:32,471 INFO [M:0;1a6e40b21a48:44599 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:48:32,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44599-0x1004fdedf950000, quorum=127.0.0.1:51766, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:48:32,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3740407e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:32,477 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e234cf7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:48:32,477 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:48:32,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@568b1686{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:48:32,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d04364e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,STOPPED} 2024-11-17T22:48:32,480 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:48:32,480 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:48:32,480 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:48:32,480 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-567024958-172.17.0.2-1731883663595 (Datanode Uuid 49dcf81b-409b-44ba-a903-7dd5ed0cf385) service to localhost/127.0.0.1:39901 2024-11-17T22:48:32,481 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data3/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:32,482 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data4/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:32,482 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:48:32,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b5be5aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:32,484 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:48:32,484 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:48:32,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:48:32,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,STOPPED} 2024-11-17T22:48:32,486 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:48:32,486 WARN [BP-567024958-172.17.0.2-1731883663595 heartbeating to localhost/127.0.0.1:39901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-567024958-172.17.0.2-1731883663595 (Datanode Uuid a80cb745-d10d-47bc-83c9-3d8e4e0c1d5a) service to localhost/127.0.0.1:39901 2024-11-17T22:48:32,486 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:48:32,486 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:48:32,487 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data7/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:32,487 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/cluster_c9a86620-a4e3-e065-0bd4-fe457a6a2fbc/data/data8/current/BP-567024958-172.17.0.2-1731883663595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:32,487 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:48:32,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62b96b7c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:48:32,493 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fd186ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:48:32,493 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:48:32,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2305029e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:48:32,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42b52d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir/,STOPPED} 2024-11-17T22:48:32,501 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T22:48:32,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T22:48:32,541 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39901 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fb240bf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44627 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39901 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39901 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:39901 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39901 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fb240bf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39901 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39901 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39901 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39901 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39901 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fb240bf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39901 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44627 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=434 (was 408) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=87 (was 145), ProcessCount=11 (was 11), AvailableMemoryMB=4291 (was 4909) 2024-11-17T22:48:32,547 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=434, MaxFileDescriptor=1048576, SystemLoadAverage=87, ProcessCount=11, AvailableMemoryMB=4290 2024-11-17T22:48:32,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.log.dir so I do NOT create it in target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dbccf65c-3a7e-6f65-b738-1a050ec66edf/hadoop.tmp.dir so I do NOT create it in target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df, deleteOnExit=true 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/test.cache.data in system properties and HBase conf 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir in system properties and HBase conf 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T22:48:32,548 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:48:32,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/nfs.dump.dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/java.io.tmpdir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T22:48:32,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T22:48:32,561 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:48:32,604 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:32,608 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:48:32,609 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:48:32,609 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:48:32,609 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:48:32,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:32,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c727387{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:48:32,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10e56c5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:48:32,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f6d993c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/java.io.tmpdir/jetty-localhost-42583-hadoop-hdfs-3_4_1-tests_jar-_-any-6720228917869392489/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:48:32,703 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ea918cd{HTTP/1.1, (http/1.1)}{localhost:42583} 2024-11-17T22:48:32,703 INFO [Time-limited test {}] server.Server(415): Started @153727ms 2024-11-17T22:48:32,714 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:48:32,752 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:32,756 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:48:32,756 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:48:32,756 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:48:32,757 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:48:32,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29ec962a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:48:32,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444db7a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:48:32,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@354edf1e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/java.io.tmpdir/jetty-localhost-40315-hadoop-hdfs-3_4_1-tests_jar-_-any-3489614940500985833/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:32,855 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2aa0e69d{HTTP/1.1, (http/1.1)}{localhost:40315} 2024-11-17T22:48:32,856 INFO [Time-limited test {}] server.Server(415): Started @153880ms 2024-11-17T22:48:32,857 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:48:32,892 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:32,895 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:48:32,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:48:32,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:48:32,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:48:32,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c7a3196{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:48:32,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23fcbb95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:48:32,919 WARN [Thread-1190 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data1/current/BP-1180934907-172.17.0.2-1731883712565/current, will proceed with Du for space computation calculation, 2024-11-17T22:48:32,919 WARN [Thread-1191 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data2/current/BP-1180934907-172.17.0.2-1731883712565/current, will proceed with Du for space computation calculation, 2024-11-17T22:48:32,933 WARN [Thread-1169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:48:32,936 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5627209b1da94e8b with lease ID 0x7fab5858edf038ee: Processing first storage report for DS-e1e33aed-3ae6-482a-8418-a6945e5581ec from datanode DatanodeRegistration(127.0.0.1:46613, datanodeUuid=a31c329b-8c82-4905-87f5-272d2bb81931, infoPort=42921, infoSecurePort=0, ipcPort=46209, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565) 2024-11-17T22:48:32,936 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5627209b1da94e8b with lease ID 0x7fab5858edf038ee: from storage DS-e1e33aed-3ae6-482a-8418-a6945e5581ec node DatanodeRegistration(127.0.0.1:46613, datanodeUuid=a31c329b-8c82-4905-87f5-272d2bb81931, infoPort=42921, infoSecurePort=0, ipcPort=46209, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T22:48:32,936 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5627209b1da94e8b with lease ID 0x7fab5858edf038ee: Processing first storage report for DS-90345e4e-5904-45e1-9ff2-cd29e04f52b4 from datanode DatanodeRegistration(127.0.0.1:46613, datanodeUuid=a31c329b-8c82-4905-87f5-272d2bb81931, infoPort=42921, infoSecurePort=0, ipcPort=46209, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565) 2024-11-17T22:48:32,936 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5627209b1da94e8b with lease ID 0x7fab5858edf038ee: from storage DS-90345e4e-5904-45e1-9ff2-cd29e04f52b4 node DatanodeRegistration(127.0.0.1:46613, datanodeUuid=a31c329b-8c82-4905-87f5-272d2bb81931, infoPort=42921, infoSecurePort=0, ipcPort=46209, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:32,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49d720be{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/java.io.tmpdir/jetty-localhost-39931-hadoop-hdfs-3_4_1-tests_jar-_-any-134558791846578528/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:32,995 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37fa3e5a{HTTP/1.1, (http/1.1)}{localhost:39931} 2024-11-17T22:48:32,995 INFO [Time-limited test {}] server.Server(415): Started @154019ms 2024-11-17T22:48:32,996 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:48:33,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:33,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:33,053 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data3/current/BP-1180934907-172.17.0.2-1731883712565/current, will proceed with Du for space computation calculation, 2024-11-17T22:48:33,053 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data4/current/BP-1180934907-172.17.0.2-1731883712565/current, will proceed with Du for space computation calculation, 2024-11-17T22:48:33,068 WARN [Thread-1205 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:48:33,070 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26f4e5d201d22044 with lease ID 0x7fab5858edf038ef: Processing first storage report for DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6 from datanode DatanodeRegistration(127.0.0.1:33101, datanodeUuid=493a7939-dbe6-4ff4-8045-f01018492982, infoPort=35299, infoSecurePort=0, ipcPort=44437, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565) 2024-11-17T22:48:33,070 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26f4e5d201d22044 with lease ID 0x7fab5858edf038ef: from storage DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6 node DatanodeRegistration(127.0.0.1:33101, datanodeUuid=493a7939-dbe6-4ff4-8045-f01018492982, infoPort=35299, infoSecurePort=0, ipcPort=44437, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:33,070 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26f4e5d201d22044 with lease ID 0x7fab5858edf038ef: Processing first storage report for DS-6a39e09b-8b20-4ac5-807d-37e0d1612f35 from datanode DatanodeRegistration(127.0.0.1:33101, datanodeUuid=493a7939-dbe6-4ff4-8045-f01018492982, infoPort=35299, infoSecurePort=0, ipcPort=44437, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565) 2024-11-17T22:48:33,071 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26f4e5d201d22044 with lease ID 0x7fab5858edf038ef: from storage DS-6a39e09b-8b20-4ac5-807d-37e0d1612f35 node DatanodeRegistration(127.0.0.1:33101, datanodeUuid=493a7939-dbe6-4ff4-8045-f01018492982, infoPort=35299, infoSecurePort=0, ipcPort=44437, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:33,122 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8 2024-11-17T22:48:33,125 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/zookeeper_0, clientPort=58964, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T22:48:33,126 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58964 2024-11-17T22:48:33,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:48:33,128 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:48:33,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:48:33,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:48:33,141 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8 with version=8 2024-11-17T22:48:33,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase-staging 2024-11-17T22:48:33,144 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:48:33,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:48:33,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:48:33,144 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:48:33,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:48:33,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:48:33,145 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T22:48:33,145 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:48:33,146 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38529 2024-11-17T22:48:33,148 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38529 connecting to ZooKeeper ensemble=127.0.0.1:58964 2024-11-17T22:48:33,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385290x0, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:48:33,153 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38529-0x1004fdf9ed20000 connected 2024-11-17T22:48:33,164 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:48:33,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:48:33,169 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:48:33,169 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8, hbase.cluster.distributed=false 2024-11-17T22:48:33,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:48:33,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38529 2024-11-17T22:48:33,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38529 2024-11-17T22:48:33,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38529 2024-11-17T22:48:33,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38529 2024-11-17T22:48:33,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38529 2024-11-17T22:48:33,187 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:48:33,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:48:33,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:48:33,187 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:48:33,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:48:33,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:48:33,187 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T22:48:33,187 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:48:33,188 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45061 2024-11-17T22:48:33,189 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45061 connecting to ZooKeeper ensemble=127.0.0.1:58964 2024-11-17T22:48:33,190 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:48:33,191 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:48:33,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:450610x0, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:48:33,195 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:450610x0, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:48:33,195 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45061-0x1004fdf9ed20001 connected 2024-11-17T22:48:33,195 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T22:48:33,195 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T22:48:33,196 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T22:48:33,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:48:33,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45061 2024-11-17T22:48:33,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45061 2024-11-17T22:48:33,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45061 2024-11-17T22:48:33,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45061 2024-11-17T22:48:33,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45061 2024-11-17T22:48:33,209 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a6e40b21a48:38529 2024-11-17T22:48:33,209 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:33,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:48:33,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:48:33,211 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:33,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T22:48:33,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,212 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T22:48:33,212 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a6e40b21a48,38529,1731883713143 from backup master directory 2024-11-17T22:48:33,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:33,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:48:33,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:48:33,213 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:48:33,213 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:33,217 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/hbase.id] with ID: cbcb3af1-f96d-4e1a-b1f8-0316e2101457 2024-11-17T22:48:33,217 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/.tmp/hbase.id 2024-11-17T22:48:33,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:48:33,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:48:33,223 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/.tmp/hbase.id]:[hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/hbase.id] 2024-11-17T22:48:33,237 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:48:33,237 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T22:48:33,238 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T22:48:33,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:48:33,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:48:33,246 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:48:33,247 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T22:48:33,247 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:48:33,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:48:33,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:48:33,255 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store 2024-11-17T22:48:33,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:48:33,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:48:33,264 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:48:33,264 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:48:33,264 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:48:33,264 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:48:33,265 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:48:33,265 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:48:33,265 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:48:33,265 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883713264Disabling compacts and flushes for region at 1731883713264Disabling writes for close at 1731883713265 (+1 ms)Writing region close event to WAL at 1731883713265Closed at 1731883713265 2024-11-17T22:48:33,266 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/.initializing 2024-11-17T22:48:33,266 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:33,269 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C38529%2C1731883713143, suffix=, logDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143, archiveDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/oldWALs, maxLogs=10 2024-11-17T22:48:33,269 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C38529%2C1731883713143.1731883713269 2024-11-17T22:48:33,274 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 2024-11-17T22:48:33,275 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35299:35299),(127.0.0.1/127.0.0.1:42921:42921)] 2024-11-17T22:48:33,276 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:48:33,277 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:48:33,277 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,277 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T22:48:33,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:33,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T22:48:33,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:48:33,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T22:48:33,283 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:48:33,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T22:48:33,284 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:48:33,285 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,285 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,285 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,286 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,286 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,287 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T22:48:33,287 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:48:33,289 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:48:33,290 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757703, jitterRate=-0.03653179109096527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T22:48:33,290 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731883713277Initializing all the Stores at 1731883713278 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883713278Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883713278Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883713278Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883713278Cleaning up temporary data from old regions at 1731883713286 (+8 ms)Region opened successfully at 1731883713290 (+4 ms) 2024-11-17T22:48:33,290 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T22:48:33,293 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27b19fdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:48:33,294 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T22:48:33,294 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T22:48:33,294 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T22:48:33,295 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T22:48:33,295 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T22:48:33,295 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T22:48:33,295 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T22:48:33,297 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T22:48:33,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T22:48:33,299 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T22:48:33,299 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T22:48:33,300 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T22:48:33,300 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T22:48:33,301 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T22:48:33,301 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T22:48:33,302 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T22:48:33,303 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T22:48:33,304 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T22:48:33,305 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T22:48:33,306 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T22:48:33,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:48:33,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:48:33,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,307 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a6e40b21a48,38529,1731883713143, sessionid=0x1004fdf9ed20000, setting cluster-up flag (Was=false) 2024-11-17T22:48:33,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,311 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T22:48:33,312 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:33,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T22:48:33,318 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:33,319 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T22:48:33,320 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T22:48:33,320 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T22:48:33,320 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T22:48:33,321 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a6e40b21a48,38529,1731883713143 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T22:48:33,322 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:48:33,322 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:48:33,322 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:48:33,322 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:48:33,322 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a6e40b21a48:0, corePoolSize=10, maxPoolSize=10 2024-11-17T22:48:33,322 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,322 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:48:33,322 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731883743325 2024-11-17T22:48:33,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T22:48:33,326 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T22:48:33,326 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T22:48:33,326 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T22:48:33,326 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T22:48:33,326 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T22:48:33,326 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,326 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:48:33,327 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T22:48:33,328 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,328 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T22:48:33,329 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T22:48:33,329 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T22:48:33,329 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T22:48:33,330 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T22:48:33,330 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T22:48:33,333 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883713330,5,FailOnTimeoutGroup] 2024-11-17T22:48:33,333 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883713333,5,FailOnTimeoutGroup] 2024-11-17T22:48:33,333 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,333 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T22:48:33,333 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,333 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:48:33,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:48:33,400 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(746): ClusterId : cbcb3af1-f96d-4e1a-b1f8-0316e2101457 2024-11-17T22:48:33,400 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T22:48:33,402 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T22:48:33,402 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T22:48:33,404 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T22:48:33,405 DEBUG [RS:0;1a6e40b21a48:45061 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49d9069d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:48:33,419 DEBUG [RS:0;1a6e40b21a48:45061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a6e40b21a48:45061 2024-11-17T22:48:33,419 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T22:48:33,419 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T22:48:33,419 DEBUG [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T22:48:33,420 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a6e40b21a48,38529,1731883713143 with port=45061, startcode=1731883713186 2024-11-17T22:48:33,421 DEBUG [RS:0;1a6e40b21a48:45061 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T22:48:33,423 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50877, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T22:48:33,423 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38529 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:33,423 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38529 {}] master.ServerManager(517): Registering regionserver=1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:33,425 DEBUG [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8 2024-11-17T22:48:33,425 DEBUG [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45073 2024-11-17T22:48:33,425 DEBUG [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T22:48:33,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:48:33,427 DEBUG [RS:0;1a6e40b21a48:45061 {}] zookeeper.ZKUtil(111): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:33,427 WARN [RS:0;1a6e40b21a48:45061 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:48:33,427 INFO [RS:0;1a6e40b21a48:45061 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:48:33,427 DEBUG [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:33,427 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a6e40b21a48,45061,1731883713186] 2024-11-17T22:48:33,430 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T22:48:33,432 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T22:48:33,432 INFO [RS:0;1a6e40b21a48:45061 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:48:33,432 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,433 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T22:48:33,433 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T22:48:33,434 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,434 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,435 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,435 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,435 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:48:33,435 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:48:33,435 DEBUG [RS:0;1a6e40b21a48:45061 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:48:33,441 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,441 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,441 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,441 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,441 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,441 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45061,1731883713186-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:48:33,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:33,455 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T22:48:33,455 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45061,1731883713186-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,456 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,456 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.Replication(171): 1a6e40b21a48,45061,1731883713186 started 2024-11-17T22:48:33,468 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:33,468 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1482): Serving as 1a6e40b21a48,45061,1731883713186, RpcServer on 1a6e40b21a48/172.17.0.2:45061, sessionid=0x1004fdf9ed20001 2024-11-17T22:48:33,469 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T22:48:33,469 DEBUG [RS:0;1a6e40b21a48:45061 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:33,469 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,45061,1731883713186' 2024-11-17T22:48:33,469 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T22:48:33,469 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T22:48:33,470 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T22:48:33,470 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T22:48:33,470 DEBUG [RS:0;1a6e40b21a48:45061 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:33,470 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,45061,1731883713186' 2024-11-17T22:48:33,470 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T22:48:33,470 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T22:48:33,471 DEBUG [RS:0;1a6e40b21a48:45061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T22:48:33,471 INFO [RS:0;1a6e40b21a48:45061 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T22:48:33,471 INFO [RS:0;1a6e40b21a48:45061 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T22:48:33,574 INFO [RS:0;1a6e40b21a48:45061 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C45061%2C1731883713186, suffix=, logDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186, archiveDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/oldWALs, maxLogs=32 2024-11-17T22:48:33,576 INFO [RS:0;1a6e40b21a48:45061 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45061%2C1731883713186.1731883713575 2024-11-17T22:48:33,584 INFO [RS:0;1a6e40b21a48:45061 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 2024-11-17T22:48:33,586 DEBUG [RS:0;1a6e40b21a48:45061 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35299:35299),(127.0.0.1/127.0.0.1:42921:42921)] 2024-11-17T22:48:33,741 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T22:48:33,741 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8 2024-11-17T22:48:33,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741833_1009 (size=32) 2024-11-17T22:48:33,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741833_1009 (size=32) 2024-11-17T22:48:33,754 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:48:33,755 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:48:33,757 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:48:33,757 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:33,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:48:33,759 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:48:33,759 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:33,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:48:33,761 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:48:33,761 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:33,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:48:33,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:48:33,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:33,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:33,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:48:33,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740 2024-11-17T22:48:33,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740 2024-11-17T22:48:33,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:48:33,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:48:33,766 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:48:33,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:48:33,769 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:48:33,769 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831518, jitterRate=0.05732989311218262}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:48:33,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731883713754Initializing all the Stores at 1731883713755 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883713755Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883713755Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883713755Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883713755Cleaning up temporary data from old regions at 1731883713765 (+10 ms)Region opened successfully at 1731883713770 (+5 ms) 2024-11-17T22:48:33,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:48:33,770 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:48:33,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:48:33,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:48:33,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:48:33,770 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:48:33,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883713770Disabling compacts and flushes for region at 1731883713770Disabling writes for close at 1731883713770Writing region close event to WAL at 1731883713770Closed at 1731883713770 2024-11-17T22:48:33,772 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:48:33,772 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T22:48:33,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T22:48:33,773 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:48:33,774 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T22:48:33,924 DEBUG [1a6e40b21a48:38529 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T22:48:33,925 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:33,927 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,45061,1731883713186, state=OPENING 2024-11-17T22:48:33,929 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T22:48:33,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:33,931 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:48:33,931 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:48:33,931 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:48:33,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,45061,1731883713186}] 2024-11-17T22:48:34,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:34,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:34,087 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T22:48:34,093 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39289, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T22:48:34,100 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T22:48:34,100 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:48:34,102 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C45061%2C1731883713186.meta, suffix=.meta, logDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186, archiveDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/oldWALs, maxLogs=32 2024-11-17T22:48:34,103 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta 2024-11-17T22:48:34,109 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta 2024-11-17T22:48:34,110 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35299:35299),(127.0.0.1/127.0.0.1:42921:42921)] 2024-11-17T22:48:34,112 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:48:34,112 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T22:48:34,112 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T22:48:34,113 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T22:48:34,113 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T22:48:34,113 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:48:34,113 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T22:48:34,113 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T22:48:34,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:48:34,116 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:48:34,116 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:34,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:34,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:48:34,118 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:48:34,118 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:34,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:34,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:48:34,120 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:48:34,120 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:34,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:34,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:48:34,121 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:48:34,121 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:34,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:48:34,122 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:48:34,123 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740 2024-11-17T22:48:34,124 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740 2024-11-17T22:48:34,125 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:48:34,125 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:48:34,125 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:48:34,126 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:48:34,127 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=754300, jitterRate=-0.04085828363895416}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:48:34,127 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T22:48:34,128 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731883714113Writing region info on filesystem at 1731883714113Initializing all the Stores at 1731883714114 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883714115 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883714115Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883714115Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883714115Cleaning up temporary data from old regions at 1731883714125 (+10 ms)Running coprocessor post-open hooks at 1731883714127 (+2 ms)Region opened successfully at 1731883714128 (+1 ms) 2024-11-17T22:48:34,129 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731883714087 2024-11-17T22:48:34,131 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T22:48:34,131 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T22:48:34,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:34,133 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,45061,1731883713186, state=OPEN 2024-11-17T22:48:34,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:48:34,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:48:34,135 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:34,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:48:34,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:48:34,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T22:48:34,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,45061,1731883713186 in 204 msec 2024-11-17T22:48:34,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T22:48:34,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 366 msec 2024-11-17T22:48:34,142 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:48:34,142 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T22:48:34,143 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:48:34,143 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,45061,1731883713186, seqNum=-1] 2024-11-17T22:48:34,144 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:48:34,145 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51843, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:48:34,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 830 msec 2024-11-17T22:48:34,151 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731883714151, completionTime=-1 2024-11-17T22:48:34,151 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T22:48:34,151 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-17T22:48:34,153 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-17T22:48:34,153 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731883774153 2024-11-17T22:48:34,153 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731883834153 2024-11-17T22:48:34,153 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-17T22:48:34,154 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,38529,1731883713143-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:34,154 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,38529,1731883713143-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:34,154 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,38529,1731883713143-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:34,154 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a6e40b21a48:38529, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:34,154 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:34,154 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:34,156 DEBUG [master/1a6e40b21a48:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T22:48:34,158 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.945sec 2024-11-17T22:48:34,158 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T22:48:34,158 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T22:48:34,158 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T22:48:34,158 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T22:48:34,159 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T22:48:34,159 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,38529,1731883713143-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:48:34,159 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,38529,1731883713143-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T22:48:34,161 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T22:48:34,161 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T22:48:34,162 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,38529,1731883713143-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:48:34,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6144f613, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:48:34,201 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a6e40b21a48,38529,-1 for getting cluster id 2024-11-17T22:48:34,201 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T22:48:34,203 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cbcb3af1-f96d-4e1a-b1f8-0316e2101457' 2024-11-17T22:48:34,204 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T22:48:34,204 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cbcb3af1-f96d-4e1a-b1f8-0316e2101457" 2024-11-17T22:48:34,205 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51e74c02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:48:34,205 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a6e40b21a48,38529,-1] 2024-11-17T22:48:34,205 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T22:48:34,206 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:34,208 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55192, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T22:48:34,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62cd82cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:48:34,210 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:48:34,211 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,45061,1731883713186, seqNum=-1] 2024-11-17T22:48:34,212 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:48:34,214 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:48:34,216 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:34,216 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:48:34,221 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T22:48:34,221 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-17T22:48:34,221 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-17T22:48:34,222 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T22:48:34,223 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:34,223 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1f5796d1 2024-11-17T22:48:34,223 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T22:48:34,225 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55194, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T22:48:34,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38529 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T22:48:34,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38529 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T22:48:34,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38529 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:48:34,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38529 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T22:48:34,230 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T22:48:34,230 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:34,230 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38529 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-17T22:48:34,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38529 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:48:34,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T22:48:34,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741835_1011 (size=395) 2024-11-17T22:48:34,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741835_1011 (size=395) 2024-11-17T22:48:34,242 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3e9e7bec8a35c756988960d0927bccb8, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8 2024-11-17T22:48:34,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46613 is added to blk_1073741836_1012 (size=78) 2024-11-17T22:48:34,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33101 is added to blk_1073741836_1012 (size=78) 2024-11-17T22:48:34,250 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:48:34,250 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 3e9e7bec8a35c756988960d0927bccb8, disabling compactions & flushes 2024-11-17T22:48:34,250 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:34,250 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:34,251 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. after waiting 0 ms 2024-11-17T22:48:34,251 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:34,251 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:34,251 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3e9e7bec8a35c756988960d0927bccb8: Waiting for close lock at 1731883714250Disabling compacts and flushes for region at 1731883714250Disabling writes for close at 1731883714251 (+1 ms)Writing region close event to WAL at 1731883714251Closed at 1731883714251 2024-11-17T22:48:34,253 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T22:48:34,253 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731883714253"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731883714253"}]},"ts":"1731883714253"} 2024-11-17T22:48:34,256 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T22:48:34,257 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T22:48:34,258 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883714257"}]},"ts":"1731883714257"} 2024-11-17T22:48:34,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-17T22:48:34,260 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3e9e7bec8a35c756988960d0927bccb8, ASSIGN}] 2024-11-17T22:48:34,261 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3e9e7bec8a35c756988960d0927bccb8, ASSIGN 2024-11-17T22:48:34,262 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3e9e7bec8a35c756988960d0927bccb8, ASSIGN; state=OFFLINE, location=1a6e40b21a48,45061,1731883713186; forceNewPlan=false, retain=false 2024-11-17T22:48:34,413 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3e9e7bec8a35c756988960d0927bccb8, regionState=OPENING, regionLocation=1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:34,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3e9e7bec8a35c756988960d0927bccb8, ASSIGN because future has completed 2024-11-17T22:48:34,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3e9e7bec8a35c756988960d0927bccb8, server=1a6e40b21a48,45061,1731883713186}] 2024-11-17T22:48:34,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:34,583 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:34,584 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3e9e7bec8a35c756988960d0927bccb8, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:48:34,585 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,585 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:48:34,585 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,585 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,587 INFO [StoreOpener-3e9e7bec8a35c756988960d0927bccb8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,590 INFO [StoreOpener-3e9e7bec8a35c756988960d0927bccb8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e9e7bec8a35c756988960d0927bccb8 columnFamilyName info 2024-11-17T22:48:34,590 DEBUG [StoreOpener-3e9e7bec8a35c756988960d0927bccb8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:48:34,591 INFO [StoreOpener-3e9e7bec8a35c756988960d0927bccb8-1 {}] regionserver.HStore(327): Store=3e9e7bec8a35c756988960d0927bccb8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:48:34,591 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,592 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,592 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,593 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,593 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,595 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,598 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:48:34,598 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3e9e7bec8a35c756988960d0927bccb8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861263, jitterRate=0.09515324234962463}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T22:48:34,599 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:34,599 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3e9e7bec8a35c756988960d0927bccb8: Running coprocessor pre-open hook at 1731883714585Writing region info on filesystem at 1731883714585Initializing all the Stores at 1731883714587 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883714587Cleaning up temporary data from old regions at 1731883714593 (+6 ms)Running coprocessor post-open hooks at 1731883714599 (+6 ms)Region opened successfully at 1731883714599 2024-11-17T22:48:34,600 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8., pid=6, masterSystemTime=1731883714574 2024-11-17T22:48:34,603 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:34,603 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:34,604 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3e9e7bec8a35c756988960d0927bccb8, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:34,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3e9e7bec8a35c756988960d0927bccb8, server=1a6e40b21a48,45061,1731883713186 because future has completed 2024-11-17T22:48:34,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T22:48:34,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3e9e7bec8a35c756988960d0927bccb8, server=1a6e40b21a48,45061,1731883713186 in 190 msec 2024-11-17T22:48:34,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T22:48:34,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3e9e7bec8a35c756988960d0927bccb8, ASSIGN in 351 msec 2024-11-17T22:48:34,615 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T22:48:34,615 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883714615"}]},"ts":"1731883714615"} 2024-11-17T22:48:34,617 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-17T22:48:34,618 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T22:48:34,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 391 msec 2024-11-17T22:48:34,902 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T22:48:34,902 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T22:48:34,904 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T22:48:34,904 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-17T22:48:34,905 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:48:34,905 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T22:48:35,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:35,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:35,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:36,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:36,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:36,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:37,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:37,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:37,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:38,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:38,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:38,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:39,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:39,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:39,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:39,644 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:48:39,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:48:39,680 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T22:48:39,681 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-17T22:48:40,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:40,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:40,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:41,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:41,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:41,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:42,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:42,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:42,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:43,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:43,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:43,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:44,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:44,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:44,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38529 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:48:44,312 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-17T22:48:44,313 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-17T22:48:44,319 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T22:48:44,319 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:44,323 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8., hostname=1a6e40b21a48,45061,1731883713186, seqNum=2] 2024-11-17T22:48:44,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:45,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:45,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:45,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:46,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:46,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:46,326 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 2024-11-17T22:48:46,328 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:46,328 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:46,328 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:46,329 WARN [DataStreamer for file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 block BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33101,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK], DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33101,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK]) is bad. 2024-11-17T22:48:46,329 WARN [DataStreamer for file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta block BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33101,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK], DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33101,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK]) is bad. 2024-11-17T22:48:46,330 WARN [DataStreamer for file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 block BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33101,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK], DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33101,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK]) is bad. 2024-11-17T22:48:46,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:49960 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:33101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49960 dst: /127.0.0.1:33101 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1182059241_22 at /127.0.0.1:49946 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49946 dst: /127.0.0.1:33101 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:49972 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49972 dst: /127.0.0.1:33101 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:53312 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:46613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53312 dst: /127.0.0.1:46613 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:53336 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53336 dst: /127.0.0.1:46613 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1182059241_22 at /127.0.0.1:53284 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53284 dst: /127.0.0.1:46613 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49d720be{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:46,335 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37fa3e5a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:48:46,335 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:48:46,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23fcbb95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:48:46,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c7a3196{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,STOPPED} 2024-11-17T22:48:46,336 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:48:46,336 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:48:46,336 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1180934907-172.17.0.2-1731883712565 (Datanode Uuid 493a7939-dbe6-4ff4-8045-f01018492982) service to localhost/127.0.0.1:45073 2024-11-17T22:48:46,336 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:48:46,337 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data3/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:46,337 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data4/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:46,337 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:48:46,345 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:46,349 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:48:46,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:48:46,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:48:46,350 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:48:46,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6565628b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:48:46,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70644e5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:48:46,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@235b3635{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/java.io.tmpdir/jetty-localhost-35689-hadoop-hdfs-3_4_1-tests_jar-_-any-6975174780276202987/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:46,444 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2786a051{HTTP/1.1, (http/1.1)}{localhost:35689} 2024-11-17T22:48:46,444 INFO [Time-limited test {}] server.Server(415): Started @167468ms 2024-11-17T22:48:46,445 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:48:46,466 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:46,466 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:46,466 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:46,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:48298 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:46613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48298 dst: /127.0.0.1:46613 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1182059241_22 at /127.0.0.1:48312 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48312 dst: /127.0.0.1:46613 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:48300 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48300 dst: /127.0.0.1:46613 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:46,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@354edf1e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:46,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:46,470 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2aa0e69d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:48:46,470 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:48:46,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444db7a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:48:46,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29ec962a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,STOPPED} 2024-11-17T22:48:46,472 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:48:46,472 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:48:46,472 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1180934907-172.17.0.2-1731883712565 (Datanode Uuid a31c329b-8c82-4905-87f5-272d2bb81931) service to localhost/127.0.0.1:45073 2024-11-17T22:48:46,472 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:48:46,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data1/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:46,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data2/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:46,472 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:48:46,482 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:46,486 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:48:46,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:48:46,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:48:46,487 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:48:46,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc5f7b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:48:46,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d692efe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:48:46,519 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:48:46,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa446498c0f6962ab with lease ID 0x7fab5858edf038f0: from storage DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6 node DatanodeRegistration(127.0.0.1:36411, datanodeUuid=493a7939-dbe6-4ff4-8045-f01018492982, infoPort=45143, infoSecurePort=0, ipcPort=39711, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:46,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa446498c0f6962ab with lease ID 0x7fab5858edf038f0: from storage DS-6a39e09b-8b20-4ac5-807d-37e0d1612f35 node DatanodeRegistration(127.0.0.1:36411, datanodeUuid=493a7939-dbe6-4ff4-8045-f01018492982, infoPort=45143, infoSecurePort=0, ipcPort=39711, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:46,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@673d3bba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/java.io.tmpdir/jetty-localhost-34363-hadoop-hdfs-3_4_1-tests_jar-_-any-2950524250541684742/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:46,586 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14c76857{HTTP/1.1, (http/1.1)}{localhost:34363} 2024-11-17T22:48:46,586 INFO [Time-limited test {}] server.Server(415): Started @167610ms 2024-11-17T22:48:46,587 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:48:46,680 WARN [Thread-1371 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:48:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47bdfe841ce2a1e4 with lease ID 0x7fab5858edf038f1: from storage DS-e1e33aed-3ae6-482a-8418-a6945e5581ec node DatanodeRegistration(127.0.0.1:40771, datanodeUuid=a31c329b-8c82-4905-87f5-272d2bb81931, infoPort=43335, infoSecurePort=0, ipcPort=43617, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47bdfe841ce2a1e4 with lease ID 0x7fab5858edf038f1: from storage DS-90345e4e-5904-45e1-9ff2-cd29e04f52b4 node DatanodeRegistration(127.0.0.1:40771, datanodeUuid=a31c329b-8c82-4905-87f5-272d2bb81931, infoPort=43335, infoSecurePort=0, ipcPort=43617, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:47,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:47,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:47,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:47,605 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-17T22:48:47,609 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-17T22:48:47,611 ERROR [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8-prefix:1a6e40b21a48,45061,1731883713186 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:47,611 WARN [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8-prefix:1a6e40b21a48,45061,1731883713186 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:47,611 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45061%2C1731883713186:(num 1731883713575) roll requested 2024-11-17T22:48:47,612 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45061%2C1731883713186.1731883727611 2024-11-17T22:48:47,618 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 newFile=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 2024-11-17T22:48:47,619 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:47,619 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:47,619 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:47,619 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:47,619 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:47,619 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 2024-11-17T22:48:47,620 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:47,620 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:47,620 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 2024-11-17T22:48:47,620 WARN [IPC Server handler 3 on default port 45073 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741832_1014 2024-11-17T22:48:47,621 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 after 1ms 2024-11-17T22:48:47,623 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45143:45143),(127.0.0.1/127.0.0.1:43335:43335)] 2024-11-17T22:48:47,624 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 is not closed yet, will try archiving it next time 2024-11-17T22:48:48,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:48,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:48,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:48,522 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741832_1014: GenerationStamp not matched, existing replica is blk_1073741832_1008 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T22:48:49,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:49,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:49,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:49,630 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-17T22:48:50,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:50,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:50,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:51,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:51,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:51,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:51,621 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 after 4001ms 2024-11-17T22:48:51,637 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:40771,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:51,637 WARN [DataStreamer for file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 block BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36411,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK], DatanodeInfoWithStorage[127.0.0.1:40771,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40771,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]) is bad. 2024-11-17T22:48:51,637 WARN [PacketResponder: BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40771] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:51,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:48152 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48152 dst: /127.0.0.1:36411 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:51,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:43302 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43302 dst: /127.0.0.1:40771 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:51,640 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@673d3bba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:51,640 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14c76857{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:48:51,640 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:48:51,640 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d692efe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:48:51,640 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc5f7b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,STOPPED} 2024-11-17T22:48:51,641 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:48:51,641 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:48:51,641 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:48:51,641 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1180934907-172.17.0.2-1731883712565 (Datanode Uuid a31c329b-8c82-4905-87f5-272d2bb81931) service to localhost/127.0.0.1:45073 2024-11-17T22:48:51,642 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data1/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:51,642 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data2/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:51,643 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:48:51,651 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:51,655 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:48:51,655 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:48:51,655 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:48:51,655 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:48:51,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75f58649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:48:51,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@571de0fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:48:51,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45604664{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/java.io.tmpdir/jetty-localhost-40333-hadoop-hdfs-3_4_1-tests_jar-_-any-5773550745962896845/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:51,750 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e793ffb{HTTP/1.1, (http/1.1)}{localhost:40333} 2024-11-17T22:48:51,750 INFO [Time-limited test {}] server.Server(415): Started @172774ms 2024-11-17T22:48:51,751 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:48:51,767 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:51,767 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1833559200_22 at /127.0.0.1:54222 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54222 dst: /127.0.0.1:36411 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:51,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@235b3635{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:51,770 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2786a051{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:48:51,770 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:48:51,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70644e5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:48:51,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6565628b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,STOPPED} 2024-11-17T22:48:51,772 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:48:51,772 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:48:51,772 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1180934907-172.17.0.2-1731883712565 (Datanode Uuid 493a7939-dbe6-4ff4-8045-f01018492982) service to localhost/127.0.0.1:45073 2024-11-17T22:48:51,772 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:48:51,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data3/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:51,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data4/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:48:51,773 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:48:51,787 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:48:51,792 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:48:51,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:48:51,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:48:51,798 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:48:51,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ede944f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:48:51,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17312068{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:48:51,824 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:48:51,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa047e812928bf669 with lease ID 0x7fab5858edf038f2: from storage DS-e1e33aed-3ae6-482a-8418-a6945e5581ec node DatanodeRegistration(127.0.0.1:44953, datanodeUuid=a31c329b-8c82-4905-87f5-272d2bb81931, infoPort=45755, infoSecurePort=0, ipcPort=41809, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:51,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa047e812928bf669 with lease ID 0x7fab5858edf038f2: from storage DS-90345e4e-5904-45e1-9ff2-cd29e04f52b4 node DatanodeRegistration(127.0.0.1:44953, datanodeUuid=a31c329b-8c82-4905-87f5-272d2bb81931, infoPort=45755, infoSecurePort=0, ipcPort=41809, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T22:48:51,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ff0f915{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/java.io.tmpdir/jetty-localhost-39823-hadoop-hdfs-3_4_1-tests_jar-_-any-7547686252815015371/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:48:51,897 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58a9274b{HTTP/1.1, (http/1.1)}{localhost:39823} 2024-11-17T22:48:51,897 INFO [Time-limited test {}] server.Server(415): Started @172921ms 2024-11-17T22:48:51,899 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:48:51,963 WARN [Thread-1445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:48:51,966 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xabd1fc8a5f287ad6 with lease ID 0x7fab5858edf038f3: from storage DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6 node DatanodeRegistration(127.0.0.1:37627, datanodeUuid=493a7939-dbe6-4ff4-8045-f01018492982, infoPort=34249, infoSecurePort=0, ipcPort=45641, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:51,966 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xabd1fc8a5f287ad6 with lease ID 0x7fab5858edf038f3: from storage DS-6a39e09b-8b20-4ac5-807d-37e0d1612f35 node DatanodeRegistration(127.0.0.1:37627, datanodeUuid=493a7939-dbe6-4ff4-8045-f01018492982, infoPort=34249, infoSecurePort=0, ipcPort=45641, storageInfo=lv=-57;cid=testClusterID;nsid=1636820393;c=1731883712565), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:48:52,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:52,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:52,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:52,917 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-17T22:48:52,922 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-17T22:48:52,925 ERROR [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8-prefix:1a6e40b21a48,45061,1731883713186 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36411,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:52,925 WARN [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8-prefix:1a6e40b21a48,45061,1731883713186 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36411,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:52,925 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45061%2C1731883713186:(num 1731883727611) roll requested 2024-11-17T22:48:52,925 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45061%2C1731883713186.1731883732925 2024-11-17T22:48:52,932 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 newFile=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 2024-11-17T22:48:52,932 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:52,932 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:52,932 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:52,933 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:52,933 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:52,933 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 2024-11-17T22:48:52,933 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36411,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:52,933 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36411,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:52,933 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 2024-11-17T22:48:52,934 WARN [IPC Server handler 4 on default port 45073 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-17T22:48:52,934 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 after 1ms 2024-11-17T22:48:52,934 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45755:45755),(127.0.0.1/127.0.0.1:34249:34249)] 2024-11-17T22:48:52,934 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 is not closed yet, will try archiving it next time 2024-11-17T22:48:53,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:53,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:53,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:53,828 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T22:48:54,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:54,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:54,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:54,936 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:54,948 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 newFile=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:54,949 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:54,949 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:54,949 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:54,949 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:54,950 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:54,950 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:54,951 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34249:34249),(127.0.0.1/127.0.0.1:45755:45755)] 2024-11-17T22:48:54,951 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 is not closed yet, will try archiving it next time 2024-11-17T22:48:54,952 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 is not closed yet, will try archiving it next time 2024-11-17T22:48:54,952 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 2024-11-17T22:48:54,952 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 2024-11-17T22:48:54,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741838_1019 (size=1264) 2024-11-17T22:48:54,953 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 after 1ms 2024-11-17T22:48:54,953 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 2024-11-17T22:48:54,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741838_1019 (size=1264) 2024-11-17T22:48:54,954 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 is not closed yet, will try archiving it next time 2024-11-17T22:48:54,966 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731883714599/Put/vlen=218/seqid=0] 2024-11-17T22:48:54,966 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731883724324/Put/vlen=1045/seqid=0] 2024-11-17T22:48:54,966 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883713575 2024-11-17T22:48:54,966 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 2024-11-17T22:48:54,966 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 2024-11-17T22:48:54,967 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 after 1ms 2024-11-17T22:48:54,967 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 2024-11-17T22:48:54,971 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731883727611/Put/vlen=1045/seqid=0] 2024-11-17T22:48:54,971 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731883729634/Put/vlen=1045/seqid=0] 2024-11-17T22:48:54,971 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 2024-11-17T22:48:54,971 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 2024-11-17T22:48:54,971 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 2024-11-17T22:48:54,971 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 after 0ms 2024-11-17T22:48:54,971 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883732925 2024-11-17T22:48:54,975 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731883732924/Put/vlen=1045/seqid=0] 2024-11-17T22:48:54,975 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:54,975 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:54,975 WARN [IPC Server handler 3 on default port 45073 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-17T22:48:54,976 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 after 0ms 2024-11-17T22:48:55,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:55,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:55,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:55,967 WARN [ResponseProcessor for block BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:55,967 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1182059241_22 at /127.0.0.1:48864 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48864 dst: /127.0.0.1:37627 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:37627 remote=/127.0.0.1:48864]. Total timeout mills is 60000, 58981 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:55,967 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1182059241_22 at /127.0.0.1:46912 [Receiving block BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44953:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46912 dst: /127.0.0.1:44953 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:48:55,967 WARN [DataStreamer for file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 block BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37627,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK], DatanodeInfoWithStorage[127.0.0.1:44953,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37627,DS-fb04d6aa-2540-43f4-a8b3-727c3095a7b6,DISK]) is bad. 2024-11-17T22:48:55,972 WARN [DataStreamer for file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 block BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:55,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741839_1022 (size=85) 2024-11-17T22:48:55,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741839_1022 (size=85) 2024-11-17T22:48:56,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:56,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:56,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:56,935 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883727611 after 4002ms 2024-11-17T22:48:57,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:57,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:57,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:58,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:58,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:58,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:58,977 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 after 4002ms 2024-11-17T22:48:58,977 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:58,984 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:58,985 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3e9e7bec8a35c756988960d0927bccb8 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-17T22:48:58,985 ERROR [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8-prefix:1a6e40b21a48,45061,1731883713186 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:58,986 WARN [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8-prefix:1a6e40b21a48,45061,1731883713186 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:58,987 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45061%2C1731883713186:(num 1731883734936) roll requested 2024-11-17T22:48:58,987 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45061%2C1731883713186.1731883738987 2024-11-17T22:48:58,993 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 newFile=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883738987 2024-11-17T22:48:58,993 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:58,993 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:58,993 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:58,993 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:58,993 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:58,993 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883738987 2024-11-17T22:48:58,993 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:58,994 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1180934907-172.17.0.2-1731883712565:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:58,994 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:58,994 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 after 0ms 2024-11-17T22:48:58,994 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45755:45755),(127.0.0.1/127.0.0.1:34249:34249)] 2024-11-17T22:48:58,995 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 to hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/oldWALs/1a6e40b21a48%2C45061%2C1731883713186.1731883734936 2024-11-17T22:48:59,009 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8/.tmp/info/8f98afe72b574c9889b07fe03d4b214d is 1080, key is row1002/info:/1731883724324/Put/seqid=0 2024-11-17T22:48:59,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741841_1024 (size=9270) 2024-11-17T22:48:59,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741841_1024 (size=9270) 2024-11-17T22:48:59,014 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8/.tmp/info/8f98afe72b574c9889b07fe03d4b214d 2024-11-17T22:48:59,020 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8/.tmp/info/8f98afe72b574c9889b07fe03d4b214d as hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8/info/8f98afe72b574c9889b07fe03d4b214d 2024-11-17T22:48:59,025 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8/info/8f98afe72b574c9889b07fe03d4b214d, entries=4, sequenceid=8, filesize=9.1 K 2024-11-17T22:48:59,027 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 3e9e7bec8a35c756988960d0927bccb8 in 42ms, sequenceid=8, compaction requested=false 2024-11-17T22:48:59,027 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3e9e7bec8a35c756988960d0927bccb8: 2024-11-17T22:48:59,027 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-17T22:48:59,027 ERROR [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8-prefix:1a6e40b21a48,45061,1731883713186.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:59,027 WARN [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8-prefix:1a6e40b21a48,45061,1731883713186.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:59,027 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C45061%2C1731883713186.meta:.meta(num 1731883714103) roll requested 2024-11-17T22:48:59,028 INFO [regionserver/1a6e40b21a48:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45061%2C1731883713186.meta.1731883739028.meta 2024-11-17T22:48:59,033 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,033 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,033 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,033 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,034 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,034 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883739028.meta 2024-11-17T22:48:59,034 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:59,034 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:48:59,034 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta 2024-11-17T22:48:59,034 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45755:45755),(127.0.0.1/127.0.0.1:34249:34249)] 2024-11-17T22:48:59,034 DEBUG [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta is not closed yet, will try archiving it next time 2024-11-17T22:48:59,035 WARN [IPC Server handler 4 on default port 45073 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-17T22:48:59,035 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta after 1ms 2024-11-17T22:48:59,050 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/info/f440ffb96f78400c9fa87f48df7ececf is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8./info:regioninfo/1731883714604/Put/seqid=0 2024-11-17T22:48:59,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:59,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741843_1027 (size=7125) 2024-11-17T22:48:59,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741843_1027 (size=7125) 2024-11-17T22:48:59,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:48:59,056 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/info/f440ffb96f78400c9fa87f48df7ececf 2024-11-17T22:48:59,074 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/ns/7c9b56201a22433ea8eb43d504ca94d8 is 43, key is default/ns:d/1731883714145/Put/seqid=0 2024-11-17T22:48:59,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741844_1028 (size=5153) 2024-11-17T22:48:59,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741844_1028 (size=5153) 2024-11-17T22:48:59,080 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/ns/7c9b56201a22433ea8eb43d504ca94d8 2024-11-17T22:48:59,098 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/table/9aa39b5498c44a31b11d648cc0f794a6 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731883714615/Put/seqid=0 2024-11-17T22:48:59,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741845_1029 (size=5438) 2024-11-17T22:48:59,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741845_1029 (size=5438) 2024-11-17T22:48:59,102 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/table/9aa39b5498c44a31b11d648cc0f794a6 2024-11-17T22:48:59,108 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/info/f440ffb96f78400c9fa87f48df7ececf as hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/info/f440ffb96f78400c9fa87f48df7ececf 2024-11-17T22:48:59,113 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/info/f440ffb96f78400c9fa87f48df7ececf, entries=10, sequenceid=11, filesize=7.0 K 2024-11-17T22:48:59,114 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/ns/7c9b56201a22433ea8eb43d504ca94d8 as hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/ns/7c9b56201a22433ea8eb43d504ca94d8 2024-11-17T22:48:59,120 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/ns/7c9b56201a22433ea8eb43d504ca94d8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T22:48:59,121 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/.tmp/table/9aa39b5498c44a31b11d648cc0f794a6 as hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/table/9aa39b5498c44a31b11d648cc0f794a6 2024-11-17T22:48:59,127 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/table/9aa39b5498c44a31b11d648cc0f794a6, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T22:48:59,129 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-17T22:48:59,129 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T22:48:59,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T22:48:59,134 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:48:59,134 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:48:59,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:59,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:59,134 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T22:48:59,134 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T22:48:59,134 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=960077611, stopped=false 2024-11-17T22:48:59,134 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a6e40b21a48,38529,1731883713143 2024-11-17T22:48:59,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:48:59,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:48:59,135 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:48:59,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:59,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:48:59,135 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:48:59,136 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:48:59,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:59,136 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a6e40b21a48,45061,1731883713186' ***** 2024-11-17T22:48:59,136 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T22:48:59,136 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:48:59,136 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:48:59,136 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T22:48:59,136 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T22:48:59,136 INFO [RS:0;1a6e40b21a48:45061 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T22:48:59,136 INFO [RS:0;1a6e40b21a48:45061 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(3091): Received CLOSE for 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(959): stopping server 1a6e40b21a48,45061,1731883713186 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a6e40b21a48:45061. 2024-11-17T22:48:59,137 DEBUG [RS:0;1a6e40b21a48:45061 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:48:59,137 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3e9e7bec8a35c756988960d0927bccb8, disabling compactions & flushes 2024-11-17T22:48:59,137 DEBUG [RS:0;1a6e40b21a48:45061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:48:59,137 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:59,137 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T22:48:59,137 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. after waiting 0 ms 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T22:48:59,137 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:59,137 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T22:48:59,137 DEBUG [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1325): Online Regions={3e9e7bec8a35c756988960d0927bccb8=TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8., 1588230740=hbase:meta,,1.1588230740} 2024-11-17T22:48:59,137 DEBUG [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3e9e7bec8a35c756988960d0927bccb8 2024-11-17T22:48:59,137 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:48:59,137 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:48:59,138 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:48:59,138 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:48:59,138 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:48:59,141 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T22:48:59,141 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/data/default/TestLogRolling-testLogRollOnPipelineRestart/3e9e7bec8a35c756988960d0927bccb8/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-17T22:48:59,142 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:48:59,142 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:48:59,142 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:59,142 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3e9e7bec8a35c756988960d0927bccb8: Waiting for close lock at 1731883739137Running coprocessor pre-close hooks at 1731883739137Disabling compacts and flushes for region at 1731883739137Disabling writes for close at 1731883739137Writing region close event to WAL at 1731883739138 (+1 ms)Running coprocessor post-close hooks at 1731883739142 (+4 ms)Closed at 1731883739142 2024-11-17T22:48:59,142 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883739137Running coprocessor pre-close hooks at 1731883739137Disabling compacts and flushes for region at 1731883739137Disabling writes for close at 1731883739138 (+1 ms)Writing region close event to WAL at 1731883739139 (+1 ms)Running coprocessor post-close hooks at 1731883739142 (+3 ms)Closed at 1731883739142 2024-11-17T22:48:59,142 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731883714226.3e9e7bec8a35c756988960d0927bccb8. 2024-11-17T22:48:59,142 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T22:48:59,338 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(976): stopping server 1a6e40b21a48,45061,1731883713186; all regions closed. 2024-11-17T22:48:59,339 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,340 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,340 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,341 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:48:59,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741842_1025 (size=825) 2024-11-17T22:48:59,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741842_1025 (size=825) 2024-11-17T22:48:59,441 INFO [regionserver/1a6e40b21a48:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T22:48:59,441 INFO [regionserver/1a6e40b21a48:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T22:48:59,443 INFO [regionserver/1a6e40b21a48:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:48:59,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:00,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:00,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:00,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:01,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:01,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:01,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:02,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:02,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:02,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:02,969 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T22:49:03,037 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta after 4003ms 2024-11-17T22:49:03,038 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/WALs/1a6e40b21a48,45061,1731883713186/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta to hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/oldWALs/1a6e40b21a48%2C45061%2C1731883713186.meta.1731883714103.meta 2024-11-17T22:49:03,047 DEBUG [RS:0;1a6e40b21a48:45061 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/oldWALs 2024-11-17T22:49:03,047 INFO [RS:0;1a6e40b21a48:45061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C45061%2C1731883713186.meta:.meta(num 1731883739028) 2024-11-17T22:49:03,048 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,048 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,048 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,049 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,049 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741840_1023 (size=1162) 2024-11-17T22:49:03,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741840_1023 (size=1162) 2024-11-17T22:49:03,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:03,059 DEBUG [RS:0;1a6e40b21a48:45061 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/oldWALs 2024-11-17T22:49:03,059 INFO [RS:0;1a6e40b21a48:45061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C45061%2C1731883713186:(num 1731883738987) 2024-11-17T22:49:03,059 DEBUG [RS:0;1a6e40b21a48:45061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:49:03,059 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:49:03,059 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:49:03,059 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.ChoreService(370): Chore service for: regionserver/1a6e40b21a48:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T22:49:03,060 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:49:03,060 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:49:03,060 INFO [RS:0;1a6e40b21a48:45061 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45061 2024-11-17T22:49:03,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:03,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:49:03,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a6e40b21a48,45061,1731883713186 2024-11-17T22:49:03,062 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:49:03,063 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a6e40b21a48,45061,1731883713186] 2024-11-17T22:49:03,064 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a6e40b21a48,45061,1731883713186 already deleted, retry=false 2024-11-17T22:49:03,064 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a6e40b21a48,45061,1731883713186 expired; onlineServers=0 2024-11-17T22:49:03,064 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a6e40b21a48,38529,1731883713143' ***** 2024-11-17T22:49:03,064 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T22:49:03,064 INFO [M:0;1a6e40b21a48:38529 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:49:03,064 INFO [M:0;1a6e40b21a48:38529 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:49:03,064 DEBUG [M:0;1a6e40b21a48:38529 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T22:49:03,064 DEBUG [M:0;1a6e40b21a48:38529 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T22:49:03,064 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T22:49:03,064 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883713330 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883713330,5,FailOnTimeoutGroup] 2024-11-17T22:49:03,064 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883713333 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883713333,5,FailOnTimeoutGroup] 2024-11-17T22:49:03,064 INFO [M:0;1a6e40b21a48:38529 {}] hbase.ChoreService(370): Chore service for: master/1a6e40b21a48:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T22:49:03,064 INFO [M:0;1a6e40b21a48:38529 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:49:03,065 DEBUG [M:0;1a6e40b21a48:38529 {}] master.HMaster(1795): Stopping service threads 2024-11-17T22:49:03,065 INFO [M:0;1a6e40b21a48:38529 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T22:49:03,065 INFO [M:0;1a6e40b21a48:38529 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:49:03,065 INFO [M:0;1a6e40b21a48:38529 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T22:49:03,065 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T22:49:03,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T22:49:03,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:03,065 DEBUG [M:0;1a6e40b21a48:38529 {}] zookeeper.ZKUtil(347): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T22:49:03,065 WARN [M:0;1a6e40b21a48:38529 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T22:49:03,066 INFO [M:0;1a6e40b21a48:38529 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/.lastflushedseqids 2024-11-17T22:49:03,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741846_1030 (size=130) 2024-11-17T22:49:03,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741846_1030 (size=130) 2024-11-17T22:49:03,072 INFO [M:0;1a6e40b21a48:38529 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T22:49:03,072 INFO [M:0;1a6e40b21a48:38529 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T22:49:03,072 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:49:03,072 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:03,072 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:03,072 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:49:03,072 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:03,072 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-17T22:49:03,073 ERROR [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData-prefix:1a6e40b21a48,38529,1731883713143 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:49:03,073 WARN [FSHLog-0-hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData-prefix:1a6e40b21a48,38529,1731883713143 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:49:03,073 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a6e40b21a48%2C38529%2C1731883713143:(num 1731883713269) roll requested 2024-11-17T22:49:03,073 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C38529%2C1731883713143.1731883743073 2024-11-17T22:49:03,078 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,078 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,078 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,078 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,078 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,078 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883743073 2024-11-17T22:49:03,080 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:49:03,080 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46613,DS-e1e33aed-3ae6-482a-8418-a6945e5581ec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T22:49:03,080 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 2024-11-17T22:49:03,081 WARN [IPC Server handler 2 on default port 45073 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-17T22:49:03,081 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 after 1ms 2024-11-17T22:49:03,084 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34249:34249),(127.0.0.1/127.0.0.1:45755:45755)] 2024-11-17T22:49:03,084 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 is not closed yet, will try archiving it next time 2024-11-17T22:49:03,099 DEBUG [M:0;1a6e40b21a48:38529 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6c8d2b5a81ae49039edffa783592aaff is 82, key is hbase:meta,,1/info:regioninfo/1731883714131/Put/seqid=0 2024-11-17T22:49:03,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741848_1033 (size=5672) 2024-11-17T22:49:03,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741848_1033 (size=5672) 2024-11-17T22:49:03,104 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6c8d2b5a81ae49039edffa783592aaff 2024-11-17T22:49:03,121 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T22:49:03,124 DEBUG [M:0;1a6e40b21a48:38529 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fcc80180ff794ed78f8f06daa46a7d62 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731883714620/Put/seqid=0 2024-11-17T22:49:03,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741849_1034 (size=6118) 2024-11-17T22:49:03,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741849_1034 (size=6118) 2024-11-17T22:49:03,130 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fcc80180ff794ed78f8f06daa46a7d62 2024-11-17T22:49:03,150 DEBUG [M:0;1a6e40b21a48:38529 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/386dfde1e2db4a1a87db043a465c3f86 is 69, key is 1a6e40b21a48,45061,1731883713186/rs:state/1731883713423/Put/seqid=0 2024-11-17T22:49:03,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741850_1035 (size=5156) 2024-11-17T22:49:03,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741850_1035 (size=5156) 2024-11-17T22:49:03,155 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/386dfde1e2db4a1a87db043a465c3f86 2024-11-17T22:49:03,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:49:03,163 INFO [RS:0;1a6e40b21a48:45061 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:49:03,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45061-0x1004fdf9ed20001, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:49:03,163 INFO [RS:0;1a6e40b21a48:45061 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a6e40b21a48,45061,1731883713186; zookeeper connection closed. 2024-11-17T22:49:03,163 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@e4056ad {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@e4056ad 2024-11-17T22:49:03,163 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T22:49:03,174 DEBUG [M:0;1a6e40b21a48:38529 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b0288164b3749acb0833c448e50d908 is 52, key is load_balancer_on/state:d/1731883714219/Put/seqid=0 2024-11-17T22:49:03,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741851_1036 (size=5056) 2024-11-17T22:49:03,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741851_1036 (size=5056) 2024-11-17T22:49:03,180 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b0288164b3749acb0833c448e50d908 2024-11-17T22:49:03,185 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6c8d2b5a81ae49039edffa783592aaff as hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6c8d2b5a81ae49039edffa783592aaff 2024-11-17T22:49:03,190 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6c8d2b5a81ae49039edffa783592aaff, entries=8, sequenceid=56, filesize=5.5 K 2024-11-17T22:49:03,192 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fcc80180ff794ed78f8f06daa46a7d62 as hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fcc80180ff794ed78f8f06daa46a7d62 2024-11-17T22:49:03,197 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fcc80180ff794ed78f8f06daa46a7d62, entries=6, sequenceid=56, filesize=6.0 K 2024-11-17T22:49:03,198 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/386dfde1e2db4a1a87db043a465c3f86 as hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/386dfde1e2db4a1a87db043a465c3f86 2024-11-17T22:49:03,203 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/386dfde1e2db4a1a87db043a465c3f86, entries=1, sequenceid=56, filesize=5.0 K 2024-11-17T22:49:03,204 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b0288164b3749acb0833c448e50d908 as hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9b0288164b3749acb0833c448e50d908 2024-11-17T22:49:03,208 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9b0288164b3749acb0833c448e50d908, entries=1, sequenceid=56, filesize=4.9 K 2024-11-17T22:49:03,209 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=56, compaction requested=false 2024-11-17T22:49:03,210 INFO [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:03,210 DEBUG [M:0;1a6e40b21a48:38529 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883743072Disabling compacts and flushes for region at 1731883743072Disabling writes for close at 1731883743072Obtaining lock to block concurrent updates at 1731883743072Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731883743072Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731883743073 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731883743085 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731883743085Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731883743099 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731883743099Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731883743108 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731883743124 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731883743124Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731883743135 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731883743149 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731883743149Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731883743159 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731883743174 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731883743174Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4546a5e7: reopening flushed file at 1731883743184 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@564c53dd: reopening flushed file at 1731883743191 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d7b1c62: reopening flushed file at 1731883743198 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13e37065: reopening flushed file at 1731883743203 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=56, compaction requested=false at 1731883743209 (+6 ms)Writing region close event to WAL at 1731883743210 (+1 ms)Closed at 1731883743210 2024-11-17T22:49:03,211 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,211 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,211 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,211 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,211 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:03,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44953 is added to blk_1073741847_1031 (size=757) 2024-11-17T22:49:03,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37627 is added to blk_1073741847_1031 (size=757) 2024-11-17T22:49:03,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:04,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:04,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:04,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,144 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:04,678 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:49:04,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:04,902 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:49:04,903 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T22:49:04,903 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T22:49:04,903 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T22:49:05,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:05,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:05,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:05,971 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T22:49:06,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:06,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:06,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:07,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:07,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:07,082 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 after 4002ms 2024-11-17T22:49:07,084 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/WALs/1a6e40b21a48,38529,1731883713143/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 to hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/oldWALs/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 2024-11-17T22:49:07,090 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/MasterData/oldWALs/1a6e40b21a48%2C38529%2C1731883713143.1731883713269 to hdfs://localhost:45073/user/jenkins/test-data/27e0d608-b807-b52f-cfb5-d44312c8f4c8/oldWALs/1a6e40b21a48%2C38529%2C1731883713143.1731883713269$masterlocalwal$ 2024-11-17T22:49:07,091 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:49:07,091 INFO [M:0;1a6e40b21a48:38529 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T22:49:07,091 INFO [M:0;1a6e40b21a48:38529 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38529 2024-11-17T22:49:07,091 INFO [M:0;1a6e40b21a48:38529 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:49:07,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:49:07,192 INFO [M:0;1a6e40b21a48:38529 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:49:07,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38529-0x1004fdf9ed20000, quorum=127.0.0.1:58964, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:49:07,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ff0f915{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:49:07,199 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58a9274b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:49:07,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:49:07,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17312068{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:49:07,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ede944f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,STOPPED} 2024-11-17T22:49:07,202 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:49:07,202 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:49:07,202 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1180934907-172.17.0.2-1731883712565 (Datanode Uuid 493a7939-dbe6-4ff4-8045-f01018492982) service to localhost/127.0.0.1:45073 2024-11-17T22:49:07,202 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:49:07,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data3/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:49:07,204 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data4/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:49:07,204 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:49:07,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45604664{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:49:07,206 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e793ffb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:49:07,206 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:49:07,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@571de0fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:49:07,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75f58649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,STOPPED} 2024-11-17T22:49:07,207 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:49:07,207 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:49:07,207 WARN [BP-1180934907-172.17.0.2-1731883712565 heartbeating to localhost/127.0.0.1:45073 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1180934907-172.17.0.2-1731883712565 (Datanode Uuid a31c329b-8c82-4905-87f5-272d2bb81931) service to localhost/127.0.0.1:45073 2024-11-17T22:49:07,207 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:49:07,207 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data1/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:49:07,207 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/cluster_4b11f703-f0de-e5d6-1f18-b51e75fa87df/data/data2/current/BP-1180934907-172.17.0.2-1731883712565 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:49:07,207 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:49:07,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f6d993c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:49:07,212 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ea918cd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:49:07,212 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:49:07,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10e56c5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:49:07,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c727387{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir/,STOPPED} 2024-11-17T22:49:07,218 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T22:49:07,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T22:49:07,245 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45073 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45073 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45073 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45073 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45073 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:45073 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45073 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45073 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 434) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=69 (was 87), ProcessCount=11 (was 11), AvailableMemoryMB=4105 (was 4290) 2024-11-17T22:49:07,251 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=69, ProcessCount=11, AvailableMemoryMB=4105 2024-11-17T22:49:07,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T22:49:07,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.log.dir so I do NOT create it in target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2 2024-11-17T22:49:07,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c228afe1-7ced-9b42-f96a-82760fb979b8/hadoop.tmp.dir so I do NOT create it in target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2 2024-11-17T22:49:07,251 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963, deleteOnExit=true 2024-11-17T22:49:07,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/test.cache.data in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.log.dir in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T22:49:07,252 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T22:49:07,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T22:49:07,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:49:07,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:49:07,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T22:49:07,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/nfs.dump.dir in system properties and HBase conf 2024-11-17T22:49:07,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/java.io.tmpdir in system properties and HBase conf 2024-11-17T22:49:07,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:49:07,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T22:49:07,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T22:49:07,266 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:49:07,309 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:49:07,314 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:49:07,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:49:07,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:49:07,315 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:49:07,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:49:07,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7638bdc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:49:07,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4761886e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:49:07,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15cd018{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/java.io.tmpdir/jetty-localhost-40979-hadoop-hdfs-3_4_1-tests_jar-_-any-9475292526398092126/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:49:07,408 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35b0b5b4{HTTP/1.1, (http/1.1)}{localhost:40979} 2024-11-17T22:49:07,408 INFO [Time-limited test {}] server.Server(415): Started @188432ms 2024-11-17T22:49:07,419 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:49:07,465 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:49:07,468 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:49:07,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:49:07,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:49:07,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:49:07,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a5f2a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:49:07,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47ce5971{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:49:07,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:07,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b4ea813{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/java.io.tmpdir/jetty-localhost-39517-hadoop-hdfs-3_4_1-tests_jar-_-any-13837792687933431965/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:49:07,562 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30c71845{HTTP/1.1, (http/1.1)}{localhost:39517} 2024-11-17T22:49:07,562 INFO [Time-limited test {}] server.Server(415): Started @188586ms 2024-11-17T22:49:07,563 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:49:07,587 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:49:07,590 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:49:07,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:49:07,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:49:07,591 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:49:07,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67c2b9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:49:07,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fdc15a6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:49:07,617 WARN [Thread-1639 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/data/data1/current/BP-1985597313-172.17.0.2-1731883747274/current, will proceed with Du for space computation calculation, 2024-11-17T22:49:07,617 WARN [Thread-1640 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/data/data2/current/BP-1985597313-172.17.0.2-1731883747274/current, will proceed with Du for space computation calculation, 2024-11-17T22:49:07,632 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:49:07,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x163db07ff5c0cd17 with lease ID 0xafc0e8df8471989b: Processing first storage report for DS-eef4ffc4-f5e0-4c6a-b249-ae2592e80c1e from datanode DatanodeRegistration(127.0.0.1:46877, datanodeUuid=c9a30852-542f-466a-859b-89763d27d7ea, infoPort=33243, infoSecurePort=0, ipcPort=33645, storageInfo=lv=-57;cid=testClusterID;nsid=1750279018;c=1731883747274) 2024-11-17T22:49:07,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x163db07ff5c0cd17 with lease ID 0xafc0e8df8471989b: from storage DS-eef4ffc4-f5e0-4c6a-b249-ae2592e80c1e node DatanodeRegistration(127.0.0.1:46877, datanodeUuid=c9a30852-542f-466a-859b-89763d27d7ea, infoPort=33243, infoSecurePort=0, ipcPort=33645, storageInfo=lv=-57;cid=testClusterID;nsid=1750279018;c=1731883747274), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:49:07,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x163db07ff5c0cd17 with lease ID 0xafc0e8df8471989b: Processing first storage report for DS-6914b3de-7dff-4e69-ba9e-787c9dd652a8 from datanode DatanodeRegistration(127.0.0.1:46877, datanodeUuid=c9a30852-542f-466a-859b-89763d27d7ea, infoPort=33243, infoSecurePort=0, ipcPort=33645, storageInfo=lv=-57;cid=testClusterID;nsid=1750279018;c=1731883747274) 2024-11-17T22:49:07,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x163db07ff5c0cd17 with lease ID 0xafc0e8df8471989b: from storage DS-6914b3de-7dff-4e69-ba9e-787c9dd652a8 node DatanodeRegistration(127.0.0.1:46877, datanodeUuid=c9a30852-542f-466a-859b-89763d27d7ea, infoPort=33243, infoSecurePort=0, ipcPort=33645, storageInfo=lv=-57;cid=testClusterID;nsid=1750279018;c=1731883747274), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:49:07,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@188d3e33{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/java.io.tmpdir/jetty-localhost-35533-hadoop-hdfs-3_4_1-tests_jar-_-any-13751514389702880704/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:49:07,686 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6da95783{HTTP/1.1, (http/1.1)}{localhost:35533} 2024-11-17T22:49:07,686 INFO [Time-limited test {}] server.Server(415): Started @188711ms 2024-11-17T22:49:07,687 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:49:07,741 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/data/data3/current/BP-1985597313-172.17.0.2-1731883747274/current, will proceed with Du for space computation calculation, 2024-11-17T22:49:07,741 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/data/data4/current/BP-1985597313-172.17.0.2-1731883747274/current, will proceed with Du for space computation calculation, 2024-11-17T22:49:07,764 WARN [Thread-1654 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:49:07,766 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ab647a729414aa7 with lease ID 0xafc0e8df8471989c: Processing first storage report for DS-c9321832-f784-4fa5-886c-0ff3f6c31408 from datanode DatanodeRegistration(127.0.0.1:37837, datanodeUuid=6c064ac7-22ca-4ac2-87f4-8703415cf23a, infoPort=37239, infoSecurePort=0, ipcPort=35315, storageInfo=lv=-57;cid=testClusterID;nsid=1750279018;c=1731883747274) 2024-11-17T22:49:07,766 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ab647a729414aa7 with lease ID 0xafc0e8df8471989c: from storage DS-c9321832-f784-4fa5-886c-0ff3f6c31408 node DatanodeRegistration(127.0.0.1:37837, datanodeUuid=6c064ac7-22ca-4ac2-87f4-8703415cf23a, infoPort=37239, infoSecurePort=0, ipcPort=35315, storageInfo=lv=-57;cid=testClusterID;nsid=1750279018;c=1731883747274), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T22:49:07,766 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ab647a729414aa7 with lease ID 0xafc0e8df8471989c: Processing first storage report for DS-7e93232d-8d12-41dd-bf96-71f685ab608a from datanode DatanodeRegistration(127.0.0.1:37837, datanodeUuid=6c064ac7-22ca-4ac2-87f4-8703415cf23a, infoPort=37239, infoSecurePort=0, ipcPort=35315, storageInfo=lv=-57;cid=testClusterID;nsid=1750279018;c=1731883747274) 2024-11-17T22:49:07,766 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ab647a729414aa7 with lease ID 0xafc0e8df8471989c: from storage DS-7e93232d-8d12-41dd-bf96-71f685ab608a node DatanodeRegistration(127.0.0.1:37837, datanodeUuid=6c064ac7-22ca-4ac2-87f4-8703415cf23a, infoPort=37239, infoSecurePort=0, ipcPort=35315, storageInfo=lv=-57;cid=testClusterID;nsid=1750279018;c=1731883747274), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:49:07,810 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2 2024-11-17T22:49:07,815 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/zookeeper_0, clientPort=59563, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T22:49:07,816 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59563 2024-11-17T22:49:07,816 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:49:07,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:49:07,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:49:07,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:49:07,831 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2 with version=8 2024-11-17T22:49:07,831 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase-staging 2024-11-17T22:49:07,834 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:49:07,834 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:49:07,834 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:49:07,834 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:49:07,834 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:49:07,834 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:49:07,834 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T22:49:07,834 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:49:07,835 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45037 2024-11-17T22:49:07,837 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45037 connecting to ZooKeeper ensemble=127.0.0.1:59563 2024-11-17T22:49:07,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:450370x0, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:49:07,841 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45037-0x1004fe026540000 connected 2024-11-17T22:49:07,853 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:49:07,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:49:07,856 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:49:07,856 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2, hbase.cluster.distributed=false 2024-11-17T22:49:07,857 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:49:07,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45037 2024-11-17T22:49:07,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45037 2024-11-17T22:49:07,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45037 2024-11-17T22:49:07,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45037 2024-11-17T22:49:07,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45037 2024-11-17T22:49:07,876 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:49:07,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:49:07,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:49:07,876 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:49:07,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:49:07,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:49:07,877 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T22:49:07,877 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:49:07,877 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46745 2024-11-17T22:49:07,879 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46745 connecting to ZooKeeper ensemble=127.0.0.1:59563 2024-11-17T22:49:07,879 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:49:07,880 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:49:07,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467450x0, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:49:07,884 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:49:07,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46745-0x1004fe026540001 connected 2024-11-17T22:49:07,884 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T22:49:07,884 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T22:49:07,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T22:49:07,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:49:07,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46745 2024-11-17T22:49:07,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46745 2024-11-17T22:49:07,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46745 2024-11-17T22:49:07,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46745 2024-11-17T22:49:07,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46745 2024-11-17T22:49:07,899 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a6e40b21a48:45037 2024-11-17T22:49:07,900 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:07,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:49:07,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:49:07,902 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:07,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T22:49:07,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,903 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T22:49:07,903 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a6e40b21a48,45037,1731883747833 from backup master directory 2024-11-17T22:49:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:49:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:49:07,904 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:49:07,904 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:07,908 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/hbase.id] with ID: cbdea8dc-a8f6-4c78-ba6c-3507a9e199fc 2024-11-17T22:49:07,908 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/.tmp/hbase.id 2024-11-17T22:49:07,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:49:07,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:49:07,913 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/.tmp/hbase.id]:[hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/hbase.id] 2024-11-17T22:49:07,924 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:49:07,924 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T22:49:07,926 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-17T22:49:07,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:49:07,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:49:07,934 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:49:07,935 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T22:49:07,935 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:49:07,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:49:07,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:49:07,943 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store 2024-11-17T22:49:07,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:49:07,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:49:07,949 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:49:07,949 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:49:07,949 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:07,949 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:07,949 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:49:07,949 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:07,949 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:07,949 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883747949Disabling compacts and flushes for region at 1731883747949Disabling writes for close at 1731883747949Writing region close event to WAL at 1731883747949Closed at 1731883747949 2024-11-17T22:49:07,950 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/.initializing 2024-11-17T22:49:07,950 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/WALs/1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:07,953 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C45037%2C1731883747833, suffix=, logDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/WALs/1a6e40b21a48,45037,1731883747833, archiveDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/oldWALs, maxLogs=10 2024-11-17T22:49:07,953 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C45037%2C1731883747833.1731883747953 2024-11-17T22:49:07,957 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/WALs/1a6e40b21a48,45037,1731883747833/1a6e40b21a48%2C45037%2C1731883747833.1731883747953 2024-11-17T22:49:07,961 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33243:33243),(127.0.0.1/127.0.0.1:37239:37239)] 2024-11-17T22:49:07,962 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:49:07,962 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:49:07,962 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,962 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T22:49:07,965 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:07,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:07,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T22:49:07,966 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:07,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:49:07,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T22:49:07,967 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:07,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:49:07,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T22:49:07,969 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:07,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:49:07,969 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,970 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,970 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,971 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,971 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,972 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T22:49:07,973 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:49:07,975 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:49:07,975 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760317, jitterRate=-0.03320741653442383}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T22:49:07,976 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731883747962Initializing all the Stores at 1731883747963 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883747963Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883747963Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883747963Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883747963Cleaning up temporary data from old regions at 1731883747971 (+8 ms)Region opened successfully at 1731883747976 (+5 ms) 2024-11-17T22:49:07,976 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T22:49:07,978 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62d1d20f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:49:07,979 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T22:49:07,979 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T22:49:07,979 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T22:49:07,980 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T22:49:07,980 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T22:49:07,980 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T22:49:07,980 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T22:49:07,982 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T22:49:07,983 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T22:49:07,984 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T22:49:07,984 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T22:49:07,984 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T22:49:07,985 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T22:49:07,985 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T22:49:07,986 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T22:49:07,986 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T22:49:07,987 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T22:49:07,988 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T22:49:07,989 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T22:49:07,989 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T22:49:07,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:49:07,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:49:07,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,991 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a6e40b21a48,45037,1731883747833, sessionid=0x1004fe026540000, setting cluster-up flag (Was=false) 2024-11-17T22:49:07,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,995 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T22:49:07,995 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:07,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:07,999 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T22:49:07,999 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:08,000 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T22:49:08,002 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T22:49:08,002 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T22:49:08,002 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T22:49:08,002 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a6e40b21a48,45037,1731883747833 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T22:49:08,003 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:49:08,003 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:49:08,003 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:49:08,003 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:49:08,003 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a6e40b21a48:0, corePoolSize=10, maxPoolSize=10 2024-11-17T22:49:08,003 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,003 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:49:08,003 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,005 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:49:08,005 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T22:49:08,005 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731883778005 2024-11-17T22:49:08,005 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T22:49:08,005 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T22:49:08,005 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T22:49:08,005 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T22:49:08,005 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T22:49:08,006 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T22:49:08,006 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,006 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,006 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T22:49:08,009 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T22:49:08,009 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T22:49:08,009 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T22:49:08,009 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T22:49:08,010 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T22:49:08,010 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883748010,5,FailOnTimeoutGroup] 2024-11-17T22:49:08,010 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883748010,5,FailOnTimeoutGroup] 2024-11-17T22:49:08,010 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,010 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T22:49:08,010 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,010 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:49:08,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:49:08,014 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T22:49:08,014 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2 2024-11-17T22:49:08,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:49:08,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:49:08,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:08,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:08,088 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(746): ClusterId : cbdea8dc-a8f6-4c78-ba6c-3507a9e199fc 2024-11-17T22:49:08,088 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T22:49:08,090 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T22:49:08,090 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T22:49:08,092 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T22:49:08,092 DEBUG [RS:0;1a6e40b21a48:46745 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42f06443, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:49:08,108 DEBUG [RS:0;1a6e40b21a48:46745 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a6e40b21a48:46745 2024-11-17T22:49:08,108 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T22:49:08,108 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T22:49:08,108 DEBUG [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T22:49:08,109 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a6e40b21a48,45037,1731883747833 with port=46745, startcode=1731883747876 2024-11-17T22:49:08,109 DEBUG [RS:0;1a6e40b21a48:46745 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T22:49:08,111 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58905, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T22:49:08,112 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45037 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,112 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45037 {}] master.ServerManager(517): Registering regionserver=1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,114 DEBUG [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2 2024-11-17T22:49:08,114 DEBUG [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37191 2024-11-17T22:49:08,114 DEBUG [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T22:49:08,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:49:08,118 DEBUG [RS:0;1a6e40b21a48:46745 {}] zookeeper.ZKUtil(111): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,119 WARN [RS:0;1a6e40b21a48:46745 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:49:08,119 INFO [RS:0;1a6e40b21a48:46745 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:49:08,119 DEBUG [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,123 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a6e40b21a48,46745,1731883747876] 2024-11-17T22:49:08,128 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T22:49:08,130 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T22:49:08,130 INFO [RS:0;1a6e40b21a48:46745 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:49:08,130 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,135 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T22:49:08,136 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T22:49:08,136 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,136 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:49:08,137 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:49:08,137 DEBUG [RS:0;1a6e40b21a48:46745 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:49:08,145 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,145 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,145 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,146 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,146 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,146 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,46745,1731883747876-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:49:08,162 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T22:49:08,162 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,46745,1731883747876-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,162 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,162 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.Replication(171): 1a6e40b21a48,46745,1731883747876 started 2024-11-17T22:49:08,175 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,175 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1482): Serving as 1a6e40b21a48,46745,1731883747876, RpcServer on 1a6e40b21a48/172.17.0.2:46745, sessionid=0x1004fe026540001 2024-11-17T22:49:08,176 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T22:49:08,176 DEBUG [RS:0;1a6e40b21a48:46745 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,176 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,46745,1731883747876' 2024-11-17T22:49:08,176 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T22:49:08,176 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T22:49:08,177 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T22:49:08,177 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T22:49:08,177 DEBUG [RS:0;1a6e40b21a48:46745 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,177 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,46745,1731883747876' 2024-11-17T22:49:08,177 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T22:49:08,177 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T22:49:08,178 DEBUG [RS:0;1a6e40b21a48:46745 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T22:49:08,178 INFO [RS:0;1a6e40b21a48:46745 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T22:49:08,178 INFO [RS:0;1a6e40b21a48:46745 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T22:49:08,280 INFO [RS:0;1a6e40b21a48:46745 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C46745%2C1731883747876, suffix=, logDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876, archiveDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/oldWALs, maxLogs=32 2024-11-17T22:49:08,280 INFO [RS:0;1a6e40b21a48:46745 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C46745%2C1731883747876.1731883748280 2024-11-17T22:49:08,286 INFO [RS:0;1a6e40b21a48:46745 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883748280 2024-11-17T22:49:08,287 DEBUG [RS:0;1a6e40b21a48:46745 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33243:33243),(127.0.0.1/127.0.0.1:37239:37239)] 2024-11-17T22:49:08,422 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:49:08,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:49:08,427 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:49:08,427 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:08,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:49:08,431 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:49:08,431 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:08,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:49:08,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:49:08,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:08,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:49:08,438 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:49:08,438 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:08,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:49:08,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740 2024-11-17T22:49:08,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740 2024-11-17T22:49:08,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:49:08,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:49:08,440 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:49:08,441 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:49:08,443 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:49:08,444 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866104, jitterRate=0.10130928456783295}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:49:08,444 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731883748422Initializing all the Stores at 1731883748423 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883748423Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883748424 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883748424Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883748424Cleaning up temporary data from old regions at 1731883748440 (+16 ms)Region opened successfully at 1731883748444 (+4 ms) 2024-11-17T22:49:08,444 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:49:08,444 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:49:08,444 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:49:08,444 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:49:08,444 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:49:08,445 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:49:08,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883748444Disabling compacts and flushes for region at 1731883748444Disabling writes for close at 1731883748444Writing region close event to WAL at 1731883748445 (+1 ms)Closed at 1731883748445 2024-11-17T22:49:08,446 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:49:08,446 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T22:49:08,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T22:49:08,448 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:49:08,449 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T22:49:08,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:08,599 DEBUG [1a6e40b21a48:45037 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T22:49:08,600 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,601 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,46745,1731883747876, state=OPENING 2024-11-17T22:49:08,603 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T22:49:08,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:08,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:08,605 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:49:08,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,46745,1731883747876}] 2024-11-17T22:49:08,605 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:49:08,605 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:49:08,761 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T22:49:08,764 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49695, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T22:49:08,770 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T22:49:08,770 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:49:08,773 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C46745%2C1731883747876.meta, suffix=.meta, logDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876, archiveDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/oldWALs, maxLogs=32 2024-11-17T22:49:08,774 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C46745%2C1731883747876.meta.1731883748773.meta 2024-11-17T22:49:08,780 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.meta.1731883748773.meta 2024-11-17T22:49:08,790 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37239:37239),(127.0.0.1/127.0.0.1:33243:33243)] 2024-11-17T22:49:08,797 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:49:08,797 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T22:49:08,797 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T22:49:08,797 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T22:49:08,797 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T22:49:08,798 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:49:08,798 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T22:49:08,798 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T22:49:08,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:49:08,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:49:08,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:08,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:49:08,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:49:08,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:08,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:49:08,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:49:08,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:08,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:49:08,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:49:08,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:49:08,805 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:49:08,805 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740 2024-11-17T22:49:08,806 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740 2024-11-17T22:49:08,807 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:49:08,807 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:49:08,808 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:49:08,809 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:49:08,810 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728774, jitterRate=-0.07331649959087372}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:49:08,810 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T22:49:08,810 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731883748798Writing region info on filesystem at 1731883748798Initializing all the Stores at 1731883748799 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883748799Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883748799Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883748799Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883748799Cleaning up temporary data from old regions at 1731883748807 (+8 ms)Running coprocessor post-open hooks at 1731883748810 (+3 ms)Region opened successfully at 1731883748810 2024-11-17T22:49:08,811 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731883748760 2024-11-17T22:49:08,813 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T22:49:08,813 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T22:49:08,814 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,815 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,46745,1731883747876, state=OPEN 2024-11-17T22:49:08,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:49:08,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:49:08,817 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:08,817 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:49:08,817 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:49:08,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T22:49:08,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,46745,1731883747876 in 212 msec 2024-11-17T22:49:08,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T22:49:08,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 375 msec 2024-11-17T22:49:08,825 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:49:08,825 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T22:49:08,827 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:49:08,827 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,46745,1731883747876, seqNum=-1] 2024-11-17T22:49:08,827 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:49:08,829 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47915, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:49:08,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 832 msec 2024-11-17T22:49:08,836 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731883748836, completionTime=-1 2024-11-17T22:49:08,836 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T22:49:08,836 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731883808838 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731883868838 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45037,1731883747833-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45037,1731883747833-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45037,1731883747833-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a6e40b21a48:45037, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,838 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,840 DEBUG [master/1a6e40b21a48:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T22:49:08,842 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.938sec 2024-11-17T22:49:08,842 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T22:49:08,842 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T22:49:08,842 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T22:49:08,842 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T22:49:08,842 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T22:49:08,843 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45037,1731883747833-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:49:08,843 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45037,1731883747833-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T22:49:08,845 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T22:49:08,845 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T22:49:08,845 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,45037,1731883747833-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:08,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec76923, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:49:08,889 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a6e40b21a48,45037,-1 for getting cluster id 2024-11-17T22:49:08,889 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T22:49:08,892 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cbdea8dc-a8f6-4c78-ba6c-3507a9e199fc' 2024-11-17T22:49:08,892 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T22:49:08,893 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cbdea8dc-a8f6-4c78-ba6c-3507a9e199fc" 2024-11-17T22:49:08,893 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e344f7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:49:08,893 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a6e40b21a48,45037,-1] 2024-11-17T22:49:08,893 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T22:49:08,894 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:49:08,896 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44736, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T22:49:08,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a2777cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:49:08,898 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:49:08,899 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,46745,1731883747876, seqNum=-1] 2024-11-17T22:49:08,900 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:49:08,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38906, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:49:08,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:08,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:49:08,907 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T22:49:08,908 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T22:49:08,909 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:08,909 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3df9f0f3 2024-11-17T22:49:08,910 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T22:49:08,911 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44740, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T22:49:08,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T22:49:08,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T22:49:08,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:49:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:08,915 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T22:49:08,915 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:08,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-17T22:49:08,916 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T22:49:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:49:08,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741835_1011 (size=405) 2024-11-17T22:49:08,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741835_1011 (size=405) 2024-11-17T22:49:08,924 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5d613cfd840cada8d2b45ea88710a813, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2 2024-11-17T22:49:08,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741836_1012 (size=88) 2024-11-17T22:49:08,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741836_1012 (size=88) 2024-11-17T22:49:08,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:49:08,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 5d613cfd840cada8d2b45ea88710a813, disabling compactions & flushes 2024-11-17T22:49:08,931 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:08,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:08,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. after waiting 0 ms 2024-11-17T22:49:08,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:08,932 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:08,932 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5d613cfd840cada8d2b45ea88710a813: Waiting for close lock at 1731883748931Disabling compacts and flushes for region at 1731883748931Disabling writes for close at 1731883748931Writing region close event to WAL at 1731883748931Closed at 1731883748931 2024-11-17T22:49:08,933 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T22:49:08,934 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731883748933"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731883748933"}]},"ts":"1731883748933"} 2024-11-17T22:49:08,936 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T22:49:08,937 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T22:49:08,938 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883748937"}]},"ts":"1731883748937"} 2024-11-17T22:49:08,940 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-17T22:49:08,940 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5d613cfd840cada8d2b45ea88710a813, ASSIGN}] 2024-11-17T22:49:08,942 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5d613cfd840cada8d2b45ea88710a813, ASSIGN 2024-11-17T22:49:08,943 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5d613cfd840cada8d2b45ea88710a813, ASSIGN; state=OFFLINE, location=1a6e40b21a48,46745,1731883747876; forceNewPlan=false, retain=false 2024-11-17T22:49:09,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:09,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:09,094 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5d613cfd840cada8d2b45ea88710a813, regionState=OPENING, regionLocation=1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:09,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5d613cfd840cada8d2b45ea88710a813, ASSIGN because future has completed 2024-11-17T22:49:09,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5d613cfd840cada8d2b45ea88710a813, server=1a6e40b21a48,46745,1731883747876}] 2024-11-17T22:49:09,266 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:09,267 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5d613cfd840cada8d2b45ea88710a813, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:49:09,267 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,267 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:49:09,268 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,268 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,270 INFO [StoreOpener-5d613cfd840cada8d2b45ea88710a813-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,273 INFO [StoreOpener-5d613cfd840cada8d2b45ea88710a813-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5d613cfd840cada8d2b45ea88710a813 columnFamilyName info 2024-11-17T22:49:09,273 DEBUG [StoreOpener-5d613cfd840cada8d2b45ea88710a813-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:49:09,274 INFO [StoreOpener-5d613cfd840cada8d2b45ea88710a813-1 {}] regionserver.HStore(327): Store=5d613cfd840cada8d2b45ea88710a813/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:49:09,274 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,275 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,275 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,275 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,275 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,277 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,279 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:49:09,279 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5d613cfd840cada8d2b45ea88710a813; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810138, jitterRate=0.03014412522315979}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T22:49:09,279 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:09,280 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5d613cfd840cada8d2b45ea88710a813: Running coprocessor pre-open hook at 1731883749268Writing region info on filesystem at 1731883749268Initializing all the Stores at 1731883749270 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883749270Cleaning up temporary data from old regions at 1731883749275 (+5 ms)Running coprocessor post-open hooks at 1731883749279 (+4 ms)Region opened successfully at 1731883749280 (+1 ms) 2024-11-17T22:49:09,281 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813., pid=6, masterSystemTime=1731883749257 2024-11-17T22:49:09,283 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:09,283 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:09,285 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5d613cfd840cada8d2b45ea88710a813, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:09,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5d613cfd840cada8d2b45ea88710a813, server=1a6e40b21a48,46745,1731883747876 because future has completed 2024-11-17T22:49:09,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T22:49:09,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5d613cfd840cada8d2b45ea88710a813, server=1a6e40b21a48,46745,1731883747876 in 186 msec 2024-11-17T22:49:09,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T22:49:09,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5d613cfd840cada8d2b45ea88710a813, ASSIGN in 351 msec 2024-11-17T22:49:09,296 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T22:49:09,296 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883749296"}]},"ts":"1731883749296"} 2024-11-17T22:49:09,298 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-17T22:49:09,300 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T22:49:09,302 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 388 msec 2024-11-17T22:49:09,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:10,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:10,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:10,408 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:49:10,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:49:10,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:11,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:11,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:11,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:12,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:12,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:12,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:13,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:13,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:13,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:14,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:14,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:14,129 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T22:49:14,130 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-17T22:49:14,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:14,902 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T22:49:14,902 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T22:49:14,903 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:49:14,903 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T22:49:14,904 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T22:49:14,904 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-17T22:49:14,905 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:14,905 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T22:49:15,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:15,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:15,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:16,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:16,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:16,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:17,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:17,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:17,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:18,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:18,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:18,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:49:19,013 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T22:49:19,013 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-17T22:49:19,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:19,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:19,025 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813., hostname=1a6e40b21a48,46745,1731883747876, seqNum=2] 2024-11-17T22:49:19,034 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:19,042 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T22:49:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-17T22:49:19,044 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T22:49:19,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T22:49:19,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:19,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:19,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46745 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-17T22:49:19,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:19,213 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5d613cfd840cada8d2b45ea88710a813 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T22:49:19,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/d07582114160474b86535aab440bedbe is 1080, key is row0001/info:/1731883759027/Put/seqid=0 2024-11-17T22:49:19,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741837_1013 (size=6033) 2024-11-17T22:49:19,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741837_1013 (size=6033) 2024-11-17T22:49:19,237 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/d07582114160474b86535aab440bedbe 2024-11-17T22:49:19,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/d07582114160474b86535aab440bedbe as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/d07582114160474b86535aab440bedbe 2024-11-17T22:49:19,250 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/d07582114160474b86535aab440bedbe, entries=1, sequenceid=5, filesize=5.9 K 2024-11-17T22:49:19,251 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5d613cfd840cada8d2b45ea88710a813 in 39ms, sequenceid=5, compaction requested=false 2024-11-17T22:49:19,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5d613cfd840cada8d2b45ea88710a813: 2024-11-17T22:49:19,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:19,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-17T22:49:19,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-17T22:49:19,257 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-17T22:49:19,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-17T22:49:19,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 222 msec 2024-11-17T22:49:19,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:20,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:20,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:20,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:21,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:21,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:21,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:22,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:22,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:22,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:22,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 after 68074ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:49:23,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:23,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:23,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:24,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:24,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:24,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:25,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:25,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:25,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:26,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:26,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:26,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:27,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:27,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:27,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:28,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:28,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:28,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:29,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:29,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:29,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-17T22:49:29,111 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T22:49:29,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:29,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-17T22:49:29,116 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T22:49:29,117 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T22:49:29,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T22:49:29,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46745 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-17T22:49:29,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:29,272 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 5d613cfd840cada8d2b45ea88710a813 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T22:49:29,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/39e0e7b669414a31af37e4861d9b75a1 is 1080, key is row0002/info:/1731883769112/Put/seqid=0 2024-11-17T22:49:29,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741838_1014 (size=6033) 2024-11-17T22:49:29,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741838_1014 (size=6033) 2024-11-17T22:49:29,289 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/39e0e7b669414a31af37e4861d9b75a1 2024-11-17T22:49:29,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/39e0e7b669414a31af37e4861d9b75a1 as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/39e0e7b669414a31af37e4861d9b75a1 2024-11-17T22:49:29,300 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/39e0e7b669414a31af37e4861d9b75a1, entries=1, sequenceid=9, filesize=5.9 K 2024-11-17T22:49:29,301 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5d613cfd840cada8d2b45ea88710a813 in 29ms, sequenceid=9, compaction requested=false 2024-11-17T22:49:29,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 5d613cfd840cada8d2b45ea88710a813: 2024-11-17T22:49:29,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:29,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-17T22:49:29,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-17T22:49:29,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-17T22:49:29,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-17T22:49:29,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-17T22:49:29,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:30,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:30,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:30,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 after 68084ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:49:30,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta after 68074ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T22:49:30,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:31,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:31,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:31,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:32,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:32,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:32,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:33,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:33,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:33,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:34,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:34,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:34,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:35,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:35,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:35,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:36,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:36,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:36,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:37,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:37,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:37,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:37,810 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T22:49:38,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:38,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:38,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:39,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:39,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-17T22:49:39,173 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T22:49:39,180 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C46745%2C1731883747876.1731883779179 2024-11-17T22:49:39,188 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:39,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:39,188 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:39,188 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:39,188 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:39,188 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883748280 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883779179 2024-11-17T22:49:39,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741833_1009 (size=5546) 2024-11-17T22:49:39,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741833_1009 (size=5546) 2024-11-17T22:49:39,194 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37239:37239),(127.0.0.1/127.0.0.1:33243:33243)] 2024-11-17T22:49:39,195 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-17T22:49:39,198 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T22:49:39,199 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T22:49:39,199 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T22:49:39,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46745 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-17T22:49:39,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:39,354 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 5d613cfd840cada8d2b45ea88710a813 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T22:49:39,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/f42a4eb70066402dbd43b88b8f87b662 is 1080, key is row0003/info:/1731883779175/Put/seqid=0 2024-11-17T22:49:39,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741840_1016 (size=6033) 2024-11-17T22:49:39,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741840_1016 (size=6033) 2024-11-17T22:49:39,369 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/f42a4eb70066402dbd43b88b8f87b662 2024-11-17T22:49:39,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/f42a4eb70066402dbd43b88b8f87b662 as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/f42a4eb70066402dbd43b88b8f87b662 2024-11-17T22:49:39,383 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/f42a4eb70066402dbd43b88b8f87b662, entries=1, sequenceid=13, filesize=5.9 K 2024-11-17T22:49:39,384 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5d613cfd840cada8d2b45ea88710a813 in 30ms, sequenceid=13, compaction requested=true 2024-11-17T22:49:39,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 5d613cfd840cada8d2b45ea88710a813: 2024-11-17T22:49:39,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:39,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-17T22:49:39,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-17T22:49:39,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-17T22:49:39,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 187 msec 2024-11-17T22:49:39,393 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-11-17T22:49:39,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:40,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:40,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:40,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:41,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:41,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:41,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:42,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:42,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:42,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:43,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:43,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:43,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:44,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:44,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:44,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:45,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:45,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:45,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:46,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:46,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:46,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:47,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:47,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:47,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:48,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:48,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:48,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:49,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:49,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:49,211 INFO [master/1a6e40b21a48:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T22:49:49,212 INFO [master/1a6e40b21a48:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T22:49:49,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-17T22:49:49,283 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T22:49:49,283 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:49:49,287 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:49:49,288 DEBUG [Time-limited test {}] regionserver.HStore(1541): 5d613cfd840cada8d2b45ea88710a813/info is initiating minor compaction (all files) 2024-11-17T22:49:49,288 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:49:49,288 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:49:49,289 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 5d613cfd840cada8d2b45ea88710a813/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:49,289 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/d07582114160474b86535aab440bedbe, hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/39e0e7b669414a31af37e4861d9b75a1, hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/f42a4eb70066402dbd43b88b8f87b662] into tmpdir=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp, totalSize=17.7 K 2024-11-17T22:49:49,290 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d07582114160474b86535aab440bedbe, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731883759027 2024-11-17T22:49:49,291 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 39e0e7b669414a31af37e4861d9b75a1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731883769112 2024-11-17T22:49:49,291 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f42a4eb70066402dbd43b88b8f87b662, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731883779175 2024-11-17T22:49:49,308 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 5d613cfd840cada8d2b45ea88710a813#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:49:49,308 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/fd9e1681a05d403eab253074a528c93d is 1080, key is row0001/info:/1731883759027/Put/seqid=0 2024-11-17T22:49:49,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741841_1017 (size=8296) 2024-11-17T22:49:49,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741841_1017 (size=8296) 2024-11-17T22:49:49,320 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/fd9e1681a05d403eab253074a528c93d as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/fd9e1681a05d403eab253074a528c93d 2024-11-17T22:49:49,326 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5d613cfd840cada8d2b45ea88710a813/info of 5d613cfd840cada8d2b45ea88710a813 into fd9e1681a05d403eab253074a528c93d(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:49:49,327 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 5d613cfd840cada8d2b45ea88710a813: 2024-11-17T22:49:49,329 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C46745%2C1731883747876.1731883789329 2024-11-17T22:49:49,335 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:49,335 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:49,335 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:49,335 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:49,335 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:49,335 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883779179 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883789329 2024-11-17T22:49:49,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37239:37239),(127.0.0.1/127.0.0.1:33243:33243)] 2024-11-17T22:49:49,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883779179 is not closed yet, will try archiving it next time 2024-11-17T22:49:49,336 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883748280 to hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/oldWALs/1a6e40b21a48%2C46745%2C1731883747876.1731883748280 2024-11-17T22:49:49,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:49,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741839_1015 (size=2520) 2024-11-17T22:49:49,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741839_1015 (size=2520) 2024-11-17T22:49:49,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:49:49,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-17T22:49:49,339 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T22:49:49,340 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T22:49:49,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T22:49:49,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46745 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-17T22:49:49,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:49,495 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 5d613cfd840cada8d2b45ea88710a813 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T22:49:49,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/ea9b755c1fa04b06958e25542e8b6ac1 is 1080, key is row0000/info:/1731883789328/Put/seqid=0 2024-11-17T22:49:49,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741843_1019 (size=6033) 2024-11-17T22:49:49,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741843_1019 (size=6033) 2024-11-17T22:49:49,510 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/ea9b755c1fa04b06958e25542e8b6ac1 2024-11-17T22:49:49,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/ea9b755c1fa04b06958e25542e8b6ac1 as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/ea9b755c1fa04b06958e25542e8b6ac1 2024-11-17T22:49:49,520 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/ea9b755c1fa04b06958e25542e8b6ac1, entries=1, sequenceid=18, filesize=5.9 K 2024-11-17T22:49:49,522 INFO [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5d613cfd840cada8d2b45ea88710a813 in 27ms, sequenceid=18, compaction requested=false 2024-11-17T22:49:49,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 5d613cfd840cada8d2b45ea88710a813: 2024-11-17T22:49:49,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:49,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-17T22:49:49,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-17T22:49:49,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-17T22:49:49,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-11-17T22:49:49,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-17T22:49:49,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:49,740 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883779179 to hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/oldWALs/1a6e40b21a48%2C46745%2C1731883747876.1731883779179 2024-11-17T22:49:50,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:50,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:50,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:51,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:51,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:51,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:52,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:52,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:52,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:53,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:53,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:53,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:54,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:54,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:54,268 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5d613cfd840cada8d2b45ea88710a813, had cached 0 bytes from a total of 14329 2024-11-17T22:49:54,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:55,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:55,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:55,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:56,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:56,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:56,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:57,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:57,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:57,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:58,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:58,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:58,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:59,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:59,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:59,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-17T22:49:59,361 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T22:49:59,364 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C46745%2C1731883747876.1731883799364 2024-11-17T22:49:59,370 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,370 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,370 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,370 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,370 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,370 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883789329 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/WALs/1a6e40b21a48,46745,1731883747876/1a6e40b21a48%2C46745%2C1731883747876.1731883799364 2024-11-17T22:49:59,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741842_1018 (size=2026) 2024-11-17T22:49:59,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741842_1018 (size=2026) 2024-11-17T22:49:59,373 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37239:37239),(127.0.0.1/127.0.0.1:33243:33243)] 2024-11-17T22:49:59,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T22:49:59,373 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:49:59,373 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:49:59,373 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:49:59,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:49:59,374 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T22:49:59,374 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T22:49:59,374 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=824251857, stopped=false 2024-11-17T22:49:59,374 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a6e40b21a48,45037,1731883747833 2024-11-17T22:49:59,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:49:59,375 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:49:59,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:59,375 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:49:59,376 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:49:59,376 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:49:59,376 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:49:59,376 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a6e40b21a48,46745,1731883747876' ***** 2024-11-17T22:49:59,376 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T22:49:59,376 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T22:49:59,376 INFO [RS:0;1a6e40b21a48:46745 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T22:49:59,376 INFO [RS:0;1a6e40b21a48:46745 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T22:49:59,376 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(3091): Received CLOSE for 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:59,377 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T22:49:59,377 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(959): stopping server 1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:59,377 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:49:59,377 INFO [RS:0;1a6e40b21a48:46745 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a6e40b21a48:46745. 2024-11-17T22:49:59,377 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5d613cfd840cada8d2b45ea88710a813, disabling compactions & flushes 2024-11-17T22:49:59,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:49:59,377 DEBUG [RS:0;1a6e40b21a48:46745 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:49:59,377 DEBUG [RS:0;1a6e40b21a48:46745 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:49:59,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:59,377 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:59,377 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:59,377 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T22:49:59,377 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T22:49:59,377 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T22:49:59,377 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. after waiting 0 ms 2024-11-17T22:49:59,377 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:59,377 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5d613cfd840cada8d2b45ea88710a813 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T22:49:59,377 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T22:49:59,378 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:49:59,381 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T22:49:59,381 DEBUG [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1325): Online Regions={5d613cfd840cada8d2b45ea88710a813=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813., 1588230740=hbase:meta,,1.1588230740} 2024-11-17T22:49:59,381 DEBUG [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5d613cfd840cada8d2b45ea88710a813 2024-11-17T22:49:59,381 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:49:59,381 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:49:59,381 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:49:59,381 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:49:59,381 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:49:59,381 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-17T22:49:59,382 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/3d3752ed67b6437f90c926300b6e3c4e is 1080, key is row0001/info:/1731883799362/Put/seqid=0 2024-11-17T22:49:59,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741845_1021 (size=6033) 2024-11-17T22:49:59,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741845_1021 (size=6033) 2024-11-17T22:49:59,392 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/3d3752ed67b6437f90c926300b6e3c4e 2024-11-17T22:49:59,404 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/info/31f81bf59d634af3a79b4e5aa5b93295 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813./info:regioninfo/1731883749284/Put/seqid=0 2024-11-17T22:49:59,410 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/.tmp/info/3d3752ed67b6437f90c926300b6e3c4e as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/3d3752ed67b6437f90c926300b6e3c4e 2024-11-17T22:49:59,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741846_1022 (size=7308) 2024-11-17T22:49:59,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741846_1022 (size=7308) 2024-11-17T22:49:59,415 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/info/31f81bf59d634af3a79b4e5aa5b93295 2024-11-17T22:49:59,417 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/3d3752ed67b6437f90c926300b6e3c4e, entries=1, sequenceid=22, filesize=5.9 K 2024-11-17T22:49:59,418 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5d613cfd840cada8d2b45ea88710a813 in 41ms, sequenceid=22, compaction requested=true 2024-11-17T22:49:59,418 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/d07582114160474b86535aab440bedbe, hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/39e0e7b669414a31af37e4861d9b75a1, hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/f42a4eb70066402dbd43b88b8f87b662] to archive 2024-11-17T22:49:59,419 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T22:49:59,421 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/d07582114160474b86535aab440bedbe to hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/d07582114160474b86535aab440bedbe 2024-11-17T22:49:59,423 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/39e0e7b669414a31af37e4861d9b75a1 to hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/39e0e7b669414a31af37e4861d9b75a1 2024-11-17T22:49:59,424 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/f42a4eb70066402dbd43b88b8f87b662 to hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/info/f42a4eb70066402dbd43b88b8f87b662 2024-11-17T22:49:59,425 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a6e40b21a48:45037 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-17T22:49:59,425 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d07582114160474b86535aab440bedbe=6033, 39e0e7b669414a31af37e4861d9b75a1=6033, f42a4eb70066402dbd43b88b8f87b662=6033] 2024-11-17T22:49:59,433 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5d613cfd840cada8d2b45ea88710a813/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-17T22:49:59,434 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:59,434 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5d613cfd840cada8d2b45ea88710a813: Waiting for close lock at 1731883799377Running coprocessor pre-close hooks at 1731883799377Disabling compacts and flushes for region at 1731883799377Disabling writes for close at 1731883799377Obtaining lock to block concurrent updates at 1731883799377Preparing flush snapshotting stores in 5d613cfd840cada8d2b45ea88710a813 at 1731883799377Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731883799378 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. at 1731883799378Flushing 5d613cfd840cada8d2b45ea88710a813/info: creating writer at 1731883799378Flushing 5d613cfd840cada8d2b45ea88710a813/info: appending metadata at 1731883799382 (+4 ms)Flushing 5d613cfd840cada8d2b45ea88710a813/info: closing flushed file at 1731883799382Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31eafc96: reopening flushed file at 1731883799403 (+21 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5d613cfd840cada8d2b45ea88710a813 in 41ms, sequenceid=22, compaction requested=true at 1731883799418 (+15 ms)Writing region close event to WAL at 1731883799429 (+11 ms)Running coprocessor post-close hooks at 1731883799434 (+5 ms)Closed at 1731883799434 2024-11-17T22:49:59,435 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731883748912.5d613cfd840cada8d2b45ea88710a813. 2024-11-17T22:49:59,443 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/ns/3e6a96810db841babf80cd3e306bf5dc is 43, key is default/ns:d/1731883748830/Put/seqid=0 2024-11-17T22:49:59,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741847_1023 (size=5153) 2024-11-17T22:49:59,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741847_1023 (size=5153) 2024-11-17T22:49:59,449 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/ns/3e6a96810db841babf80cd3e306bf5dc 2024-11-17T22:49:59,475 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/table/5dbaaa4a16f445aabd43eb0ff3a8a557 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731883749296/Put/seqid=0 2024-11-17T22:49:59,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741848_1024 (size=5508) 2024-11-17T22:49:59,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741848_1024 (size=5508) 2024-11-17T22:49:59,480 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/table/5dbaaa4a16f445aabd43eb0ff3a8a557 2024-11-17T22:49:59,485 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/info/31f81bf59d634af3a79b4e5aa5b93295 as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/info/31f81bf59d634af3a79b4e5aa5b93295 2024-11-17T22:49:59,490 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/info/31f81bf59d634af3a79b4e5aa5b93295, entries=10, sequenceid=11, filesize=7.1 K 2024-11-17T22:49:59,491 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/ns/3e6a96810db841babf80cd3e306bf5dc as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/ns/3e6a96810db841babf80cd3e306bf5dc 2024-11-17T22:49:59,497 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/ns/3e6a96810db841babf80cd3e306bf5dc, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T22:49:59,498 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/.tmp/table/5dbaaa4a16f445aabd43eb0ff3a8a557 as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/table/5dbaaa4a16f445aabd43eb0ff3a8a557 2024-11-17T22:49:59,505 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/table/5dbaaa4a16f445aabd43eb0ff3a8a557, entries=2, sequenceid=11, filesize=5.4 K 2024-11-17T22:49:59,506 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false 2024-11-17T22:49:59,511 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T22:49:59,512 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:49:59,512 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:49:59,512 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883799381Running coprocessor pre-close hooks at 1731883799381Disabling compacts and flushes for region at 1731883799381Disabling writes for close at 1731883799381Obtaining lock to block concurrent updates at 1731883799381Preparing flush snapshotting stores in 1588230740 at 1731883799381Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731883799382 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731883799383 (+1 ms)Flushing 1588230740/info: creating writer at 1731883799383Flushing 1588230740/info: appending metadata at 1731883799403 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731883799403Flushing 1588230740/ns: creating writer at 1731883799421 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731883799443 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731883799443Flushing 1588230740/table: creating writer at 1731883799456 (+13 ms)Flushing 1588230740/table: appending metadata at 1731883799474 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731883799474Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48535e33: reopening flushed file at 1731883799485 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45745298: reopening flushed file at 1731883799490 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f374aa5: reopening flushed file at 1731883799497 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false at 1731883799506 (+9 ms)Writing region close event to WAL at 1731883799507 (+1 ms)Running coprocessor post-close hooks at 1731883799512 (+5 ms)Closed at 1731883799512 2024-11-17T22:49:59,512 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T22:49:59,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:49:59,581 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(976): stopping server 1a6e40b21a48,46745,1731883747876; all regions closed. 2024-11-17T22:49:59,582 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,582 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,582 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,582 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741834_1010 (size=3306) 2024-11-17T22:49:59,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741834_1010 (size=3306) 2024-11-17T22:49:59,587 DEBUG [RS:0;1a6e40b21a48:46745 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/oldWALs 2024-11-17T22:49:59,587 INFO [RS:0;1a6e40b21a48:46745 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C46745%2C1731883747876.meta:.meta(num 1731883748773) 2024-11-17T22:49:59,587 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,588 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,588 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,588 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,588 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741844_1020 (size=1252) 2024-11-17T22:49:59,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741844_1020 (size=1252) 2024-11-17T22:49:59,593 DEBUG [RS:0;1a6e40b21a48:46745 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/oldWALs 2024-11-17T22:49:59,593 INFO [RS:0;1a6e40b21a48:46745 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C46745%2C1731883747876:(num 1731883799364) 2024-11-17T22:49:59,593 DEBUG [RS:0;1a6e40b21a48:46745 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:49:59,593 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:49:59,593 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:49:59,594 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.ChoreService(370): Chore service for: regionserver/1a6e40b21a48:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T22:49:59,594 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:49:59,594 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:49:59,594 INFO [RS:0;1a6e40b21a48:46745 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46745 2024-11-17T22:49:59,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:49:59,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a6e40b21a48,46745,1731883747876 2024-11-17T22:49:59,596 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:49:59,596 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a6e40b21a48,46745,1731883747876] 2024-11-17T22:49:59,596 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$362/0x00007fb240902b70@22de074 rejected from java.util.concurrent.ThreadPoolExecutor@472f6e55[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-17T22:49:59,597 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a6e40b21a48,46745,1731883747876 already deleted, retry=false 2024-11-17T22:49:59,598 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a6e40b21a48,46745,1731883747876 expired; onlineServers=0 2024-11-17T22:49:59,598 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a6e40b21a48,45037,1731883747833' ***** 2024-11-17T22:49:59,598 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T22:49:59,598 INFO [M:0;1a6e40b21a48:45037 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:49:59,598 INFO [M:0;1a6e40b21a48:45037 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:49:59,598 DEBUG [M:0;1a6e40b21a48:45037 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T22:49:59,598 DEBUG [M:0;1a6e40b21a48:45037 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T22:49:59,598 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T22:49:59,598 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883748010 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883748010,5,FailOnTimeoutGroup] 2024-11-17T22:49:59,598 INFO [M:0;1a6e40b21a48:45037 {}] hbase.ChoreService(370): Chore service for: master/1a6e40b21a48:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T22:49:59,598 INFO [M:0;1a6e40b21a48:45037 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:49:59,598 DEBUG [M:0;1a6e40b21a48:45037 {}] master.HMaster(1795): Stopping service threads 2024-11-17T22:49:59,598 INFO [M:0;1a6e40b21a48:45037 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T22:49:59,598 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883748010 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883748010,5,FailOnTimeoutGroup] 2024-11-17T22:49:59,598 INFO [M:0;1a6e40b21a48:45037 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:49:59,599 INFO [M:0;1a6e40b21a48:45037 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T22:49:59,599 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T22:49:59,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T22:49:59,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:49:59,600 DEBUG [M:0;1a6e40b21a48:45037 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-17T22:49:59,600 DEBUG [M:0;1a6e40b21a48:45037 {}] master.ActiveMasterManager(353): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-17T22:49:59,601 INFO [M:0;1a6e40b21a48:45037 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/.lastflushedseqids 2024-11-17T22:49:59,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741849_1025 (size=130) 2024-11-17T22:49:59,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741849_1025 (size=130) 2024-11-17T22:49:59,618 INFO [M:0;1a6e40b21a48:45037 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T22:49:59,618 INFO [M:0;1a6e40b21a48:45037 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T22:49:59,618 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:49:59,618 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:59,618 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:59,618 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:49:59,618 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:59,618 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-17T22:49:59,645 DEBUG [M:0;1a6e40b21a48:45037 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e0ca193630e4acf87cdc19e9c177bb9 is 82, key is hbase:meta,,1/info:regioninfo/1731883748814/Put/seqid=0 2024-11-17T22:49:59,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741850_1026 (size=5672) 2024-11-17T22:49:59,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741850_1026 (size=5672) 2024-11-17T22:49:59,656 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e0ca193630e4acf87cdc19e9c177bb9 2024-11-17T22:49:59,686 DEBUG [M:0;1a6e40b21a48:45037 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9923207a8aaf4c449033f81dff874c7b is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731883749301/Put/seqid=0 2024-11-17T22:49:59,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741851_1027 (size=7824) 2024-11-17T22:49:59,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741851_1027 (size=7824) 2024-11-17T22:49:59,693 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9923207a8aaf4c449033f81dff874c7b 2024-11-17T22:49:59,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:49:59,697 INFO [RS:0;1a6e40b21a48:46745 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:49:59,697 INFO [RS:0;1a6e40b21a48:46745 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a6e40b21a48,46745,1731883747876; zookeeper connection closed. 2024-11-17T22:49:59,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46745-0x1004fe026540001, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:49:59,698 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2bc8d10e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2bc8d10e 2024-11-17T22:49:59,698 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T22:49:59,698 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9923207a8aaf4c449033f81dff874c7b 2024-11-17T22:49:59,713 DEBUG [M:0;1a6e40b21a48:45037 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/61f48dade57744c19deaca3313e788cd is 69, key is 1a6e40b21a48,46745,1731883747876/rs:state/1731883748112/Put/seqid=0 2024-11-17T22:49:59,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741852_1028 (size=5156) 2024-11-17T22:49:59,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741852_1028 (size=5156) 2024-11-17T22:49:59,719 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/61f48dade57744c19deaca3313e788cd 2024-11-17T22:49:59,739 DEBUG [M:0;1a6e40b21a48:45037 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9f4d6b49680c49149d9db6f1f53a5dfa is 52, key is load_balancer_on/state:d/1731883748906/Put/seqid=0 2024-11-17T22:49:59,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741853_1029 (size=5056) 2024-11-17T22:49:59,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741853_1029 (size=5056) 2024-11-17T22:49:59,745 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9f4d6b49680c49149d9db6f1f53a5dfa 2024-11-17T22:49:59,751 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e0ca193630e4acf87cdc19e9c177bb9 as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7e0ca193630e4acf87cdc19e9c177bb9 2024-11-17T22:49:59,758 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7e0ca193630e4acf87cdc19e9c177bb9, entries=8, sequenceid=121, filesize=5.5 K 2024-11-17T22:49:59,759 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9923207a8aaf4c449033f81dff874c7b as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9923207a8aaf4c449033f81dff874c7b 2024-11-17T22:49:59,765 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9923207a8aaf4c449033f81dff874c7b 2024-11-17T22:49:59,765 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9923207a8aaf4c449033f81dff874c7b, entries=14, sequenceid=121, filesize=7.6 K 2024-11-17T22:49:59,767 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/61f48dade57744c19deaca3313e788cd as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/61f48dade57744c19deaca3313e788cd 2024-11-17T22:49:59,772 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/61f48dade57744c19deaca3313e788cd, entries=1, sequenceid=121, filesize=5.0 K 2024-11-17T22:49:59,774 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9f4d6b49680c49149d9db6f1f53a5dfa as hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9f4d6b49680c49149d9db6f1f53a5dfa 2024-11-17T22:49:59,778 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37191/user/jenkins/test-data/0c9e6ceb-376f-5cbd-b7a8-3b80351cb6b2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9f4d6b49680c49149d9db6f1f53a5dfa, entries=1, sequenceid=121, filesize=4.9 K 2024-11-17T22:49:59,780 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44647, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=121, compaction requested=false 2024-11-17T22:49:59,791 INFO [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:49:59,791 DEBUG [M:0;1a6e40b21a48:45037 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883799618Disabling compacts and flushes for region at 1731883799618Disabling writes for close at 1731883799618Obtaining lock to block concurrent updates at 1731883799618Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731883799618Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44647, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1731883799619 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731883799620 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731883799620Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731883799644 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731883799644Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731883799663 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731883799685 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731883799685Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731883799699 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731883799713 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731883799713Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731883799724 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731883799739 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731883799739Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2eca64d7: reopening flushed file at 1731883799750 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dd574a2: reopening flushed file at 1731883799758 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4711252b: reopening flushed file at 1731883799766 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f9e7c18: reopening flushed file at 1731883799773 (+7 ms)Finished flush of dataSize ~43.60 KB/44647, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=121, compaction requested=false at 1731883799780 (+7 ms)Writing region close event to WAL at 1731883799791 (+11 ms)Closed at 1731883799791 2024-11-17T22:49:59,792 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,792 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,792 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,792 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,792 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:49:59,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37837 is added to blk_1073741830_1006 (size=53044) 2024-11-17T22:49:59,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741830_1006 (size=53044) 2024-11-17T22:49:59,795 INFO [M:0;1a6e40b21a48:45037 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T22:49:59,795 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:49:59,795 INFO [M:0;1a6e40b21a48:45037 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45037 2024-11-17T22:49:59,795 INFO [M:0;1a6e40b21a48:45037 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:49:59,897 INFO [M:0;1a6e40b21a48:45037 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:49:59,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:49:59,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45037-0x1004fe026540000, quorum=127.0.0.1:59563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:49:59,904 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@188d3e33{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:49:59,904 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6da95783{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:49:59,904 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:49:59,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fdc15a6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:49:59,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67c2b9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.log.dir/,STOPPED} 2024-11-17T22:49:59,906 WARN [BP-1985597313-172.17.0.2-1731883747274 heartbeating to localhost/127.0.0.1:37191 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:49:59,907 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:49:59,907 WARN [BP-1985597313-172.17.0.2-1731883747274 heartbeating to localhost/127.0.0.1:37191 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1985597313-172.17.0.2-1731883747274 (Datanode Uuid 6c064ac7-22ca-4ac2-87f4-8703415cf23a) service to localhost/127.0.0.1:37191 2024-11-17T22:49:59,907 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:49:59,907 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/data/data3/current/BP-1985597313-172.17.0.2-1731883747274 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:49:59,907 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/data/data4/current/BP-1985597313-172.17.0.2-1731883747274 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:49:59,908 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:49:59,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b4ea813{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:49:59,910 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30c71845{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:49:59,910 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:49:59,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47ce5971{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:49:59,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a5f2a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.log.dir/,STOPPED} 2024-11-17T22:49:59,912 WARN [BP-1985597313-172.17.0.2-1731883747274 heartbeating to localhost/127.0.0.1:37191 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:49:59,912 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:49:59,912 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:49:59,912 WARN [BP-1985597313-172.17.0.2-1731883747274 heartbeating to localhost/127.0.0.1:37191 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1985597313-172.17.0.2-1731883747274 (Datanode Uuid c9a30852-542f-466a-859b-89763d27d7ea) service to localhost/127.0.0.1:37191 2024-11-17T22:49:59,912 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/data/data1/current/BP-1985597313-172.17.0.2-1731883747274 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:49:59,913 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/cluster_8302175d-06ac-9ab8-268d-7043ad67e963/data/data2/current/BP-1985597313-172.17.0.2-1731883747274 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:49:59,913 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:49:59,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15cd018{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:49:59,920 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35b0b5b4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:49:59,920 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:49:59,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4761886e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:49:59,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7638bdc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.log.dir/,STOPPED} 2024-11-17T22:49:59,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T22:49:59,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T22:49:59,963 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 180) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37191 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37191 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37191 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37191 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37191 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37191 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/1a6e40b21a48:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37191 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:37191 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=60 (was 69), ProcessCount=11 (was 11), AvailableMemoryMB=5130 (was 4105) - AvailableMemoryMB LEAK? - 2024-11-17T22:49:59,974 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=60, ProcessCount=11, AvailableMemoryMB=5128 2024-11-17T22:49:59,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T22:49:59,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.log.dir so I do NOT create it in target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86 2024-11-17T22:49:59,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aee07e74-aae9-5697-b8ca-5413984cc1b2/hadoop.tmp.dir so I do NOT create it in target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86 2024-11-17T22:49:59,974 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d, deleteOnExit=true 2024-11-17T22:49:59,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T22:49:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/test.cache.data in system properties and HBase conf 2024-11-17T22:49:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T22:49:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.log.dir in system properties and HBase conf 2024-11-17T22:49:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T22:49:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T22:49:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T22:49:59,975 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T22:49:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:49:59,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/nfs.dump.dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/java.io.tmpdir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T22:49:59,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T22:49:59,995 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:50:00,055 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:50:00,059 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:50:00,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:50:00,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:50:00,060 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:50:00,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:50:00,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10ce7a76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:50:00,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56433553{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:50:00,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:00,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:00,149 INFO [regionserver/1a6e40b21a48:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:50:00,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52d230c9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/java.io.tmpdir/jetty-localhost-37989-hadoop-hdfs-3_4_1-tests_jar-_-any-2009802116079100788/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:50:00,163 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@240fc28c{HTTP/1.1, (http/1.1)}{localhost:37989} 2024-11-17T22:50:00,163 INFO [Time-limited test {}] server.Server(415): Started @241187ms 2024-11-17T22:50:00,176 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:50:00,235 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:50:00,238 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:50:00,242 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:50:00,243 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:50:00,243 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:50:00,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a48d3d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:50:00,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53cff5cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:50:00,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45890504{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/java.io.tmpdir/jetty-localhost-36531-hadoop-hdfs-3_4_1-tests_jar-_-any-15654133930587060229/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:50:00,343 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d639fc0{HTTP/1.1, (http/1.1)}{localhost:36531} 2024-11-17T22:50:00,343 INFO [Time-limited test {}] server.Server(415): Started @241367ms 2024-11-17T22:50:00,344 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:50:00,390 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:50:00,394 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:50:00,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:50:00,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:50:00,396 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:50:00,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7305dd28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:50:00,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dab95de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:50:00,415 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/data/data1/current/BP-1786302858-172.17.0.2-1731883800009/current, will proceed with Du for space computation calculation, 2024-11-17T22:50:00,415 WARN [Thread-1956 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/data/data2/current/BP-1786302858-172.17.0.2-1731883800009/current, will proceed with Du for space computation calculation, 2024-11-17T22:50:00,437 WARN [Thread-1934 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:50:00,442 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5fb5f8ab77422fa with lease ID 0xf4039bb0b8e5a44b: Processing first storage report for DS-325b6be0-193e-4e01-986d-9bbe56467870 from datanode DatanodeRegistration(127.0.0.1:41973, datanodeUuid=4d2c2aa8-b30f-44ed-9614-ff46a008aa86, infoPort=43219, infoSecurePort=0, ipcPort=38371, storageInfo=lv=-57;cid=testClusterID;nsid=731122339;c=1731883800009) 2024-11-17T22:50:00,442 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5fb5f8ab77422fa with lease ID 0xf4039bb0b8e5a44b: from storage DS-325b6be0-193e-4e01-986d-9bbe56467870 node DatanodeRegistration(127.0.0.1:41973, datanodeUuid=4d2c2aa8-b30f-44ed-9614-ff46a008aa86, infoPort=43219, infoSecurePort=0, ipcPort=38371, storageInfo=lv=-57;cid=testClusterID;nsid=731122339;c=1731883800009), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:50:00,442 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5fb5f8ab77422fa with lease ID 0xf4039bb0b8e5a44b: Processing first storage report for DS-9a81012b-e13e-4fed-a435-a3b7e3a57dde from datanode DatanodeRegistration(127.0.0.1:41973, datanodeUuid=4d2c2aa8-b30f-44ed-9614-ff46a008aa86, infoPort=43219, infoSecurePort=0, ipcPort=38371, storageInfo=lv=-57;cid=testClusterID;nsid=731122339;c=1731883800009) 2024-11-17T22:50:00,442 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5fb5f8ab77422fa with lease ID 0xf4039bb0b8e5a44b: from storage DS-9a81012b-e13e-4fed-a435-a3b7e3a57dde node DatanodeRegistration(127.0.0.1:41973, datanodeUuid=4d2c2aa8-b30f-44ed-9614-ff46a008aa86, infoPort=43219, infoSecurePort=0, ipcPort=38371, storageInfo=lv=-57;cid=testClusterID;nsid=731122339;c=1731883800009), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:50:00,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9612b29{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/java.io.tmpdir/jetty-localhost-37827-hadoop-hdfs-3_4_1-tests_jar-_-any-14923769115660186836/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:50:00,502 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@314e7370{HTTP/1.1, (http/1.1)}{localhost:37827} 2024-11-17T22:50:00,502 INFO [Time-limited test {}] server.Server(415): Started @241526ms 2024-11-17T22:50:00,503 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:50:00,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:00,567 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/data/data4/current/BP-1786302858-172.17.0.2-1731883800009/current, will proceed with Du for space computation calculation, 2024-11-17T22:50:00,567 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/data/data3/current/BP-1786302858-172.17.0.2-1731883800009/current, will proceed with Du for space computation calculation, 2024-11-17T22:50:00,587 WARN [Thread-1970 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:50:00,589 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3cd7d132da644119 with lease ID 0xf4039bb0b8e5a44c: Processing first storage report for DS-cabd2033-3b45-458f-ae19-5d5f439fc85c from datanode DatanodeRegistration(127.0.0.1:34851, datanodeUuid=c74d30f6-3bc6-4903-89ca-5f82b0017218, infoPort=37525, infoSecurePort=0, ipcPort=42895, storageInfo=lv=-57;cid=testClusterID;nsid=731122339;c=1731883800009) 2024-11-17T22:50:00,589 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cd7d132da644119 with lease ID 0xf4039bb0b8e5a44c: from storage DS-cabd2033-3b45-458f-ae19-5d5f439fc85c node DatanodeRegistration(127.0.0.1:34851, datanodeUuid=c74d30f6-3bc6-4903-89ca-5f82b0017218, infoPort=37525, infoSecurePort=0, ipcPort=42895, storageInfo=lv=-57;cid=testClusterID;nsid=731122339;c=1731883800009), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:50:00,589 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3cd7d132da644119 with lease ID 0xf4039bb0b8e5a44c: Processing first storage report for DS-69cdd905-7c98-4188-bb77-ddb7640ee9ab from datanode DatanodeRegistration(127.0.0.1:34851, datanodeUuid=c74d30f6-3bc6-4903-89ca-5f82b0017218, infoPort=37525, infoSecurePort=0, ipcPort=42895, storageInfo=lv=-57;cid=testClusterID;nsid=731122339;c=1731883800009) 2024-11-17T22:50:00,589 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cd7d132da644119 with lease ID 0xf4039bb0b8e5a44c: from storage DS-69cdd905-7c98-4188-bb77-ddb7640ee9ab node DatanodeRegistration(127.0.0.1:34851, datanodeUuid=c74d30f6-3bc6-4903-89ca-5f82b0017218, infoPort=37525, infoSecurePort=0, ipcPort=42895, storageInfo=lv=-57;cid=testClusterID;nsid=731122339;c=1731883800009), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:50:00,627 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86 2024-11-17T22:50:00,631 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/zookeeper_0, clientPort=59694, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T22:50:00,632 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59694 2024-11-17T22:50:00,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:00,634 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:00,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:50:00,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:50:00,647 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e with version=8 2024-11-17T22:50:00,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase-staging 2024-11-17T22:50:00,649 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:50:00,649 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:00,649 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:00,649 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:50:00,649 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:00,649 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:50:00,649 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T22:50:00,649 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:50:00,650 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46683 2024-11-17T22:50:00,651 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46683 connecting to ZooKeeper ensemble=127.0.0.1:59694 2024-11-17T22:50:00,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:466830x0, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:50:00,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46683-0x1004fe0f4a40000 connected 2024-11-17T22:50:00,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:00,678 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:00,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:50:00,683 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e, hbase.cluster.distributed=false 2024-11-17T22:50:00,685 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:50:00,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46683 2024-11-17T22:50:00,690 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46683 2024-11-17T22:50:00,690 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46683 2024-11-17T22:50:00,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46683 2024-11-17T22:50:00,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46683 2024-11-17T22:50:00,707 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:50:00,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:00,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:00,707 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:50:00,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:00,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:50:00,707 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T22:50:00,707 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:50:00,708 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35999 2024-11-17T22:50:00,710 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35999 connecting to ZooKeeper ensemble=127.0.0.1:59694 2024-11-17T22:50:00,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:00,714 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:00,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359990x0, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:50:00,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359990x0, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:50:00,720 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T22:50:00,721 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35999-0x1004fe0f4a40001 connected 2024-11-17T22:50:00,728 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T22:50:00,728 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T22:50:00,729 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:50:00,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35999 2024-11-17T22:50:00,737 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35999 2024-11-17T22:50:00,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35999 2024-11-17T22:50:00,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35999 2024-11-17T22:50:00,742 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35999 2024-11-17T22:50:00,753 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a6e40b21a48:46683 2024-11-17T22:50:00,753 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:00,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:50:00,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:50:00,755 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:00,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T22:50:00,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:00,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:00,756 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T22:50:00,756 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a6e40b21a48,46683,1731883800648 from backup master directory 2024-11-17T22:50:00,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:50:00,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:00,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:50:00,757 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:50:00,757 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:00,762 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/hbase.id] with ID: 06c9ab33-52f1-4273-acf6-3c3144450d26 2024-11-17T22:50:00,762 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/.tmp/hbase.id 2024-11-17T22:50:00,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:50:00,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:50:00,772 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/.tmp/hbase.id]:[hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/hbase.id] 2024-11-17T22:50:00,784 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:00,784 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T22:50:00,786 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-17T22:50:00,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:00,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:00,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:50:00,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:50:00,807 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:50:00,808 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T22:50:00,808 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:50:00,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:50:00,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:50:01,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:01,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:01,221 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store 2024-11-17T22:50:01,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:50:01,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:50:01,234 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:01,234 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:50:01,234 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:01,234 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:01,234 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:50:01,234 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:01,234 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:01,234 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883801234Disabling compacts and flushes for region at 1731883801234Disabling writes for close at 1731883801234Writing region close event to WAL at 1731883801234Closed at 1731883801234 2024-11-17T22:50:01,236 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/.initializing 2024-11-17T22:50:01,236 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/WALs/1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:01,239 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C46683%2C1731883800648, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/WALs/1a6e40b21a48,46683,1731883800648, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/oldWALs, maxLogs=10 2024-11-17T22:50:01,240 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C46683%2C1731883800648.1731883801240 2024-11-17T22:50:01,251 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/WALs/1a6e40b21a48,46683,1731883800648/1a6e40b21a48%2C46683%2C1731883800648.1731883801240 2024-11-17T22:50:01,257 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37525:37525),(127.0.0.1/127.0.0.1:43219:43219)] 2024-11-17T22:50:01,261 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:50:01,262 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:01,262 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,262 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T22:50:01,265 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:01,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T22:50:01,267 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:01,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T22:50:01,268 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,269 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:01,269 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,270 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T22:50:01,270 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,271 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:01,271 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,272 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,272 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,273 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,273 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,274 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T22:50:01,275 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:01,277 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:50:01,278 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795931, jitterRate=0.012078672647476196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T22:50:01,278 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731883801262Initializing all the Stores at 1731883801263 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883801263Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883801263Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883801263Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883801263Cleaning up temporary data from old regions at 1731883801273 (+10 ms)Region opened successfully at 1731883801278 (+5 ms) 2024-11-17T22:50:01,281 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T22:50:01,284 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dfd555e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:50:01,285 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T22:50:01,285 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T22:50:01,285 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T22:50:01,285 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T22:50:01,286 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T22:50:01,286 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T22:50:01,286 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T22:50:01,291 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T22:50:01,291 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T22:50:01,292 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T22:50:01,293 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T22:50:01,293 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T22:50:01,294 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T22:50:01,294 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T22:50:01,295 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T22:50:01,296 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T22:50:01,297 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T22:50:01,298 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T22:50:01,300 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T22:50:01,301 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T22:50:01,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:50:01,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:50:01,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:01,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:01,303 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a6e40b21a48,46683,1731883800648, sessionid=0x1004fe0f4a40000, setting cluster-up flag (Was=false) 2024-11-17T22:50:01,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:01,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:01,307 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T22:50:01,308 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:01,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:01,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:01,312 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T22:50:01,313 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:01,314 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T22:50:01,315 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T22:50:01,315 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T22:50:01,316 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T22:50:01,316 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a6e40b21a48,46683,1731883800648 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T22:50:01,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:50:01,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:50:01,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:50:01,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:50:01,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a6e40b21a48:0, corePoolSize=10, maxPoolSize=10 2024-11-17T22:50:01,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:50:01,317 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,324 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731883831324 2024-11-17T22:50:01,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T22:50:01,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T22:50:01,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T22:50:01,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T22:50:01,325 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:50:01,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T22:50:01,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T22:50:01,325 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T22:50:01,326 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,326 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T22:50:01,333 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,333 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T22:50:01,334 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T22:50:01,334 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T22:50:01,334 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T22:50:01,334 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T22:50:01,337 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883801334,5,FailOnTimeoutGroup] 2024-11-17T22:50:01,337 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883801337,5,FailOnTimeoutGroup] 2024-11-17T22:50:01,337 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,337 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T22:50:01,337 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,337 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:50:01,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:50:01,343 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T22:50:01,344 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e 2024-11-17T22:50:01,346 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(746): ClusterId : 06c9ab33-52f1-4273-acf6-3c3144450d26 2024-11-17T22:50:01,346 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T22:50:01,347 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T22:50:01,347 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T22:50:01,348 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T22:50:01,349 DEBUG [RS:0;1a6e40b21a48:35999 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7604748b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:50:01,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:50:01,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:50:01,358 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:01,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:50:01,362 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:50:01,362 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:01,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:50:01,364 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:50:01,364 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:01,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:50:01,365 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:50:01,365 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:01,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:50:01,366 DEBUG [RS:0;1a6e40b21a48:35999 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a6e40b21a48:35999 2024-11-17T22:50:01,366 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T22:50:01,366 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T22:50:01,366 DEBUG [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T22:50:01,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:50:01,367 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a6e40b21a48,46683,1731883800648 with port=35999, startcode=1731883800706 2024-11-17T22:50:01,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:01,367 DEBUG [RS:0;1a6e40b21a48:35999 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T22:50:01,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:01,367 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:50:01,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740 2024-11-17T22:50:01,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740 2024-11-17T22:50:01,369 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45081, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T22:50:01,369 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46683 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:01,369 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46683 {}] master.ServerManager(517): Registering regionserver=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:01,371 DEBUG [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e 2024-11-17T22:50:01,371 DEBUG [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46027 2024-11-17T22:50:01,371 DEBUG [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T22:50:01,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:50:01,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:50:01,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:50:01,373 DEBUG [RS:0;1a6e40b21a48:35999 {}] zookeeper.ZKUtil(111): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:01,373 WARN [RS:0;1a6e40b21a48:35999 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:50:01,373 INFO [RS:0;1a6e40b21a48:35999 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:50:01,373 DEBUG [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:01,373 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:50:01,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:50:01,378 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a6e40b21a48,35999,1731883800706] 2024-11-17T22:50:01,381 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:50:01,382 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787596, jitterRate=0.001480594277381897}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:50:01,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731883801358Initializing all the Stores at 1731883801359 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883801359Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883801361 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883801361Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883801361Cleaning up temporary data from old regions at 1731883801372 (+11 ms)Region opened successfully at 1731883801382 (+10 ms) 2024-11-17T22:50:01,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:50:01,383 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:50:01,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:50:01,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:50:01,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:50:01,383 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T22:50:01,383 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:50:01,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883801383Disabling compacts and flushes for region at 1731883801383Disabling writes for close at 1731883801383Writing region close event to WAL at 1731883801383Closed at 1731883801383 2024-11-17T22:50:01,384 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:50:01,384 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T22:50:01,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T22:50:01,386 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:50:01,387 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T22:50:01,389 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T22:50:01,393 INFO [RS:0;1a6e40b21a48:35999 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:50:01,393 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,394 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T22:50:01,395 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T22:50:01,395 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,395 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,396 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,396 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:01,396 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:50:01,396 DEBUG [RS:0;1a6e40b21a48:35999 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:50:01,397 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,397 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,397 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,397 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,397 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,397 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,35999,1731883800706-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:50:01,414 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T22:50:01,414 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,35999,1731883800706-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,414 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,414 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.Replication(171): 1a6e40b21a48,35999,1731883800706 started 2024-11-17T22:50:01,432 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:01,432 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1482): Serving as 1a6e40b21a48,35999,1731883800706, RpcServer on 1a6e40b21a48/172.17.0.2:35999, sessionid=0x1004fe0f4a40001 2024-11-17T22:50:01,432 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T22:50:01,432 DEBUG [RS:0;1a6e40b21a48:35999 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:01,432 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,35999,1731883800706' 2024-11-17T22:50:01,432 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T22:50:01,433 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T22:50:01,434 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T22:50:01,434 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T22:50:01,434 DEBUG [RS:0;1a6e40b21a48:35999 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:01,434 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,35999,1731883800706' 2024-11-17T22:50:01,434 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T22:50:01,434 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T22:50:01,434 DEBUG [RS:0;1a6e40b21a48:35999 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T22:50:01,434 INFO [RS:0;1a6e40b21a48:35999 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T22:50:01,434 INFO [RS:0;1a6e40b21a48:35999 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T22:50:01,536 INFO [RS:0;1a6e40b21a48:35999 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C35999%2C1731883800706, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/oldWALs, maxLogs=32 2024-11-17T22:50:01,537 WARN [1a6e40b21a48:46683 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-17T22:50:01,537 INFO [RS:0;1a6e40b21a48:35999 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C35999%2C1731883800706.1731883801537 2024-11-17T22:50:01,546 INFO [RS:0;1a6e40b21a48:35999 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706/1a6e40b21a48%2C35999%2C1731883800706.1731883801537 2024-11-17T22:50:01,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:01,550 DEBUG [RS:0;1a6e40b21a48:35999 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37525:37525),(127.0.0.1/127.0.0.1:43219:43219)] 2024-11-17T22:50:01,787 DEBUG [1a6e40b21a48:46683 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T22:50:01,787 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:01,789 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,35999,1731883800706, state=OPENING 2024-11-17T22:50:01,790 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T22:50:01,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:01,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:01,792 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:50:01,792 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:50:01,792 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:50:01,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,35999,1731883800706}] 2024-11-17T22:50:01,986 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T22:50:01,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49535, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T22:50:01,994 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T22:50:01,995 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:50:01,997 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C35999%2C1731883800706.meta, suffix=.meta, logDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/oldWALs, maxLogs=32 2024-11-17T22:50:01,998 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C35999%2C1731883800706.meta.1731883801997.meta 2024-11-17T22:50:02,009 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706/1a6e40b21a48%2C35999%2C1731883800706.meta.1731883801997.meta 2024-11-17T22:50:02,017 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37525:37525),(127.0.0.1/127.0.0.1:43219:43219)] 2024-11-17T22:50:02,025 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:50:02,026 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T22:50:02,026 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T22:50:02,026 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T22:50:02,026 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T22:50:02,026 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:02,026 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T22:50:02,026 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T22:50:02,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:50:02,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:50:02,031 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:02,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:02,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:50:02,032 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:50:02,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:02,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:02,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:50:02,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:50:02,035 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:02,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:02,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:50:02,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:50:02,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:02,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:02,038 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:50:02,039 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740 2024-11-17T22:50:02,040 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740 2024-11-17T22:50:02,043 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:50:02,043 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:50:02,045 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:50:02,050 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:50:02,052 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862214, jitterRate=0.0963621586561203}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:50:02,052 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T22:50:02,053 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731883802026Writing region info on filesystem at 1731883802026Initializing all the Stores at 1731883802028 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883802028Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883802029 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883802029Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883802029Cleaning up temporary data from old regions at 1731883802043 (+14 ms)Running coprocessor post-open hooks at 1731883802052 (+9 ms)Region opened successfully at 1731883802053 (+1 ms) 2024-11-17T22:50:02,057 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731883801986 2024-11-17T22:50:02,060 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T22:50:02,060 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T22:50:02,069 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:02,070 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,35999,1731883800706, state=OPEN 2024-11-17T22:50:02,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:50:02,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:50:02,072 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:02,072 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:50:02,072 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:50:02,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T22:50:02,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,35999,1731883800706 in 280 msec 2024-11-17T22:50:02,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T22:50:02,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 692 msec 2024-11-17T22:50:02,081 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:50:02,081 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T22:50:02,082 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:50:02,083 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,35999,1731883800706, seqNum=-1] 2024-11-17T22:50:02,083 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:50:02,084 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33063, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:50:02,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 776 msec 2024-11-17T22:50:02,092 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731883802092, completionTime=-1 2024-11-17T22:50:02,092 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T22:50:02,092 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731883862095 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731883922095 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,46683,1731883800648-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,46683,1731883800648-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,46683,1731883800648-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a6e40b21a48:46683, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:02,095 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:02,097 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:02,098 DEBUG [master/1a6e40b21a48:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T22:50:02,102 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.345sec 2024-11-17T22:50:02,102 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T22:50:02,102 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T22:50:02,102 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T22:50:02,102 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T22:50:02,102 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T22:50:02,102 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,46683,1731883800648-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:50:02,102 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,46683,1731883800648-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T22:50:02,105 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T22:50:02,105 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T22:50:02,105 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,46683,1731883800648-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:02,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:02,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:02,152 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@208716aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:50:02,153 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a6e40b21a48,46683,-1 for getting cluster id 2024-11-17T22:50:02,153 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T22:50:02,155 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '06c9ab33-52f1-4273-acf6-3c3144450d26' 2024-11-17T22:50:02,155 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T22:50:02,155 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "06c9ab33-52f1-4273-acf6-3c3144450d26" 2024-11-17T22:50:02,156 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c51d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:50:02,156 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a6e40b21a48,46683,-1] 2024-11-17T22:50:02,156 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T22:50:02,156 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:02,158 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39658, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T22:50:02,159 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181dda1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:50:02,159 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:50:02,160 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,35999,1731883800706, seqNum=-1] 2024-11-17T22:50:02,161 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:50:02,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42766, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:50:02,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:02,165 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:02,167 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T22:50:02,168 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T22:50:02,169 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:02,169 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7be5d4b3 2024-11-17T22:50:02,169 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T22:50:02,170 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39660, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T22:50:02,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46683 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T22:50:02,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46683 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T22:50:02,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46683 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:50:02,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46683 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-17T22:50:02,174 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T22:50:02,174 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:02,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46683 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-17T22:50:02,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46683 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:50:02,175 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T22:50:02,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741835_1011 (size=381) 2024-11-17T22:50:02,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741835_1011 (size=381) 2024-11-17T22:50:02,189 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1f4b8723a07f669c388b5143f2a00156, NAME => 'TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e 2024-11-17T22:50:02,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741836_1012 (size=64) 2024-11-17T22:50:02,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741836_1012 (size=64) 2024-11-17T22:50:02,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:02,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1f4b8723a07f669c388b5143f2a00156, disabling compactions & flushes 2024-11-17T22:50:02,196 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:02,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:02,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. after waiting 0 ms 2024-11-17T22:50:02,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:02,196 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:02,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1f4b8723a07f669c388b5143f2a00156: Waiting for close lock at 1731883802196Disabling compacts and flushes for region at 1731883802196Disabling writes for close at 1731883802196Writing region close event to WAL at 1731883802196Closed at 1731883802196 2024-11-17T22:50:02,198 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T22:50:02,198 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731883802198"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731883802198"}]},"ts":"1731883802198"} 2024-11-17T22:50:02,200 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T22:50:02,201 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T22:50:02,201 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883802201"}]},"ts":"1731883802201"} 2024-11-17T22:50:02,203 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-17T22:50:02,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, ASSIGN}] 2024-11-17T22:50:02,205 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, ASSIGN 2024-11-17T22:50:02,206 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, ASSIGN; state=OFFLINE, location=1a6e40b21a48,35999,1731883800706; forceNewPlan=false, retain=false 2024-11-17T22:50:02,357 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1f4b8723a07f669c388b5143f2a00156, regionState=OPENING, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:02,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, ASSIGN because future has completed 2024-11-17T22:50:02,360 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1f4b8723a07f669c388b5143f2a00156, server=1a6e40b21a48,35999,1731883800706}] 2024-11-17T22:50:02,517 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:02,518 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1f4b8723a07f669c388b5143f2a00156, NAME => 'TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:50:02,518 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,518 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:02,518 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,518 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,523 INFO [StoreOpener-1f4b8723a07f669c388b5143f2a00156-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,525 INFO [StoreOpener-1f4b8723a07f669c388b5143f2a00156-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1f4b8723a07f669c388b5143f2a00156 columnFamilyName info 2024-11-17T22:50:02,525 DEBUG [StoreOpener-1f4b8723a07f669c388b5143f2a00156-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:02,526 INFO [StoreOpener-1f4b8723a07f669c388b5143f2a00156-1 {}] regionserver.HStore(327): Store=1f4b8723a07f669c388b5143f2a00156/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:02,526 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,527 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,527 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,528 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,528 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,530 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,532 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:50:02,533 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1f4b8723a07f669c388b5143f2a00156; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817495, jitterRate=0.039499834179878235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T22:50:02,533 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:02,534 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1f4b8723a07f669c388b5143f2a00156: Running coprocessor pre-open hook at 1731883802518Writing region info on filesystem at 1731883802518Initializing all the Stores at 1731883802519 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883802519Cleaning up temporary data from old regions at 1731883802528 (+9 ms)Running coprocessor post-open hooks at 1731883802533 (+5 ms)Region opened successfully at 1731883802534 (+1 ms) 2024-11-17T22:50:02,535 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., pid=6, masterSystemTime=1731883802513 2024-11-17T22:50:02,538 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:02,538 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:02,539 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1f4b8723a07f669c388b5143f2a00156, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:02,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1f4b8723a07f669c388b5143f2a00156, server=1a6e40b21a48,35999,1731883800706 because future has completed 2024-11-17T22:50:02,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T22:50:02,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1f4b8723a07f669c388b5143f2a00156, server=1a6e40b21a48,35999,1731883800706 in 183 msec 2024-11-17T22:50:02,547 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T22:50:02,547 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, ASSIGN in 342 msec 2024-11-17T22:50:02,548 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T22:50:02,548 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731883802548"}]},"ts":"1731883802548"} 2024-11-17T22:50:02,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:02,551 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-17T22:50:02,552 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T22:50:02,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 381 msec 2024-11-17T22:50:03,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:03,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:03,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:04,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:04,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:04,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:04,969 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:50:04,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:04,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:05,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-17T22:50:05,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T22:50:05,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T22:50:05,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:05,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:05,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:06,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:06,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:06,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:07,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:07,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:07,384 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T22:50:07,385 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-17T22:50:07,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:08,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:08,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:08,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:09,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:09,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:09,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:10,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:10,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:10,525 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:50:10,526 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,530 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,530 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:10,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:10,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:11,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:11,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:11,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:12,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:12,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:12,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46683 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T22:50:12,252 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-17T22:50:12,252 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-17T22:50:12,255 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-17T22:50:12,255 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:12,259 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., hostname=1a6e40b21a48,35999,1731883800706, seqNum=2] 2024-11-17T22:50:12,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:12,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:50:12,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/227478b583374433b3acc436a0966d86 is 1080, key is row0001/info:/1731883812260/Put/seqid=0 2024-11-17T22:50:12,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741837_1013 (size=12509) 2024-11-17T22:50:12,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741837_1013 (size=12509) 2024-11-17T22:50:12,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/227478b583374433b3acc436a0966d86 2024-11-17T22:50:12,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/227478b583374433b3acc436a0966d86 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/227478b583374433b3acc436a0966d86 2024-11-17T22:50:12,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/227478b583374433b3acc436a0966d86, entries=7, sequenceid=11, filesize=12.2 K 2024-11-17T22:50:12,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 1f4b8723a07f669c388b5143f2a00156 in 41ms, sequenceid=11, compaction requested=false 2024-11-17T22:50:12,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:12,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:12,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-17T22:50:12,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/ff9ec9aac1684bd7a7cad47a680d97d5 is 1080, key is row0008/info:/1731883812282/Put/seqid=0 2024-11-17T22:50:12,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741838_1014 (size=25453) 2024-11-17T22:50:12,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741838_1014 (size=25453) 2024-11-17T22:50:12,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/ff9ec9aac1684bd7a7cad47a680d97d5 2024-11-17T22:50:12,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/ff9ec9aac1684bd7a7cad47a680d97d5 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5 2024-11-17T22:50:12,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5, entries=19, sequenceid=33, filesize=24.9 K 2024-11-17T22:50:12,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for 1f4b8723a07f669c388b5143f2a00156 in 32ms, sequenceid=33, compaction requested=false 2024-11-17T22:50:12,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:12,356 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-17T22:50:12,356 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:12,356 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5 because midkey is the same as first or last row 2024-11-17T22:50:12,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:13,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:13,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:13,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:14,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:14,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:14,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:14,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:50:14,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/e52cb797151e40afab07686683800613 is 1080, key is row0027/info:/1731883812325/Put/seqid=0 2024-11-17T22:50:14,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741839_1015 (size=12509) 2024-11-17T22:50:14,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741839_1015 (size=12509) 2024-11-17T22:50:14,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/e52cb797151e40afab07686683800613 2024-11-17T22:50:14,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/e52cb797151e40afab07686683800613 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e52cb797151e40afab07686683800613 2024-11-17T22:50:14,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e52cb797151e40afab07686683800613, entries=7, sequenceid=43, filesize=12.2 K 2024-11-17T22:50:14,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 1f4b8723a07f669c388b5143f2a00156 in 29ms, sequenceid=43, compaction requested=true 2024-11-17T22:50:14,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:14,375 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,375 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,375 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5 because midkey is the same as first or last row 2024-11-17T22:50:14,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:14,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1f4b8723a07f669c388b5143f2a00156:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:14,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:14,376 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:14,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-17T22:50:14,377 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:14,377 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 1f4b8723a07f669c388b5143f2a00156/info is initiating minor compaction (all files) 2024-11-17T22:50:14,378 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1f4b8723a07f669c388b5143f2a00156/info in TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:14,378 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/227478b583374433b3acc436a0966d86, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e52cb797151e40afab07686683800613] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp, totalSize=49.3 K 2024-11-17T22:50:14,378 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 227478b583374433b3acc436a0966d86, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731883812260 2024-11-17T22:50:14,378 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff9ec9aac1684bd7a7cad47a680d97d5, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1731883812282 2024-11-17T22:50:14,379 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting e52cb797151e40afab07686683800613, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731883812325 2024-11-17T22:50:14,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/c4c80e0546a6429eaafcbe884a5384dd is 1080, key is row0034/info:/1731883814349/Put/seqid=0 2024-11-17T22:50:14,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741840_1016 (size=16817) 2024-11-17T22:50:14,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741840_1016 (size=16817) 2024-11-17T22:50:14,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/c4c80e0546a6429eaafcbe884a5384dd 2024-11-17T22:50:14,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/c4c80e0546a6429eaafcbe884a5384dd as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c4c80e0546a6429eaafcbe884a5384dd 2024-11-17T22:50:14,394 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1f4b8723a07f669c388b5143f2a00156#info#compaction#58 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:14,394 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/f5b02f39e717456fa112c6b0cabc912e is 1080, key is row0001/info:/1731883812260/Put/seqid=0 2024-11-17T22:50:14,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c4c80e0546a6429eaafcbe884a5384dd, entries=11, sequenceid=57, filesize=16.4 K 2024-11-17T22:50:14,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 1f4b8723a07f669c388b5143f2a00156 in 25ms, sequenceid=57, compaction requested=false 2024-11-17T22:50:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:14,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5 because midkey is the same as first or last row 2024-11-17T22:50:14,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-17T22:50:14,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741841_1017 (size=40670) 2024-11-17T22:50:14,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741841_1017 (size=40670) 2024-11-17T22:50:14,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/33149c266c8541278966700bc4bf35dd is 1080, key is row0045/info:/1731883814377/Put/seqid=0 2024-11-17T22:50:14,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741842_1018 (size=16817) 2024-11-17T22:50:14,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741842_1018 (size=16817) 2024-11-17T22:50:14,416 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/f5b02f39e717456fa112c6b0cabc912e as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e 2024-11-17T22:50:14,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=71 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/33149c266c8541278966700bc4bf35dd 2024-11-17T22:50:14,423 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1f4b8723a07f669c388b5143f2a00156/info of 1f4b8723a07f669c388b5143f2a00156 into f5b02f39e717456fa112c6b0cabc912e(size=39.7 K), total size for store is 56.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:14,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/33149c266c8541278966700bc4bf35dd as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/33149c266c8541278966700bc4bf35dd 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:14,423 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., storeName=1f4b8723a07f669c388b5143f2a00156/info, priority=13, startTime=1731883814375; duration=0sec 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e because midkey is the same as first or last row 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e because midkey is the same as first or last row 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e because midkey is the same as first or last row 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:14,423 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1f4b8723a07f669c388b5143f2a00156:info 2024-11-17T22:50:14,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/33149c266c8541278966700bc4bf35dd, entries=11, sequenceid=71, filesize=16.4 K 2024-11-17T22:50:14,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 1f4b8723a07f669c388b5143f2a00156 in 28ms, sequenceid=71, compaction requested=true 2024-11-17T22:50:14,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:14,429 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,429 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,429 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e because midkey is the same as first or last row 2024-11-17T22:50:14,430 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:14,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1f4b8723a07f669c388b5143f2a00156:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:14,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:14,431 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74304 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:14,431 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 1f4b8723a07f669c388b5143f2a00156/info is initiating minor compaction (all files) 2024-11-17T22:50:14,432 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1f4b8723a07f669c388b5143f2a00156/info in TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:14,432 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c4c80e0546a6429eaafcbe884a5384dd, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/33149c266c8541278966700bc4bf35dd] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp, totalSize=72.6 K 2024-11-17T22:50:14,432 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5b02f39e717456fa112c6b0cabc912e, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731883812260 2024-11-17T22:50:14,433 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting c4c80e0546a6429eaafcbe884a5384dd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1731883814349 2024-11-17T22:50:14,433 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 33149c266c8541278966700bc4bf35dd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1731883814377 2024-11-17T22:50:14,445 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1f4b8723a07f669c388b5143f2a00156#info#compaction#60 average throughput is 28.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:14,445 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/c1718b0849a4472f9368c7f575bf3b29 is 1080, key is row0001/info:/1731883812260/Put/seqid=0 2024-11-17T22:50:14,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741843_1019 (size=64535) 2024-11-17T22:50:14,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741843_1019 (size=64535) 2024-11-17T22:50:14,456 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/c1718b0849a4472f9368c7f575bf3b29 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29 2024-11-17T22:50:14,462 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1f4b8723a07f669c388b5143f2a00156/info of 1f4b8723a07f669c388b5143f2a00156 into c1718b0849a4472f9368c7f575bf3b29(size=63.0 K), total size for store is 63.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:14,462 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., storeName=1f4b8723a07f669c388b5143f2a00156/info, priority=13, startTime=1731883814429; duration=0sec 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29 because midkey is the same as first or last row 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29 because midkey is the same as first or last row 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:14,462 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29 because midkey is the same as first or last row 2024-11-17T22:50:14,463 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:14,463 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1f4b8723a07f669c388b5143f2a00156:info 2024-11-17T22:50:14,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:15,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:15,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:15,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:16,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:16,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-17T22:50:16,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/71482c7542e9400b8c52537bef2c3014 is 1080, key is row0056/info:/1731883814402/Put/seqid=0 2024-11-17T22:50:16,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741844_1020 (size=15740) 2024-11-17T22:50:16,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741844_1020 (size=15740) 2024-11-17T22:50:16,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/71482c7542e9400b8c52537bef2c3014 2024-11-17T22:50:16,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/71482c7542e9400b8c52537bef2c3014 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/71482c7542e9400b8c52537bef2c3014 2024-11-17T22:50:16,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/71482c7542e9400b8c52537bef2c3014, entries=10, sequenceid=86, filesize=15.4 K 2024-11-17T22:50:16,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for 1f4b8723a07f669c388b5143f2a00156 in 26ms, sequenceid=86, compaction requested=false 2024-11-17T22:50:16,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:16,453 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-17T22:50:16,453 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:16,453 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29 because midkey is the same as first or last row 2024-11-17T22:50:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-17T22:50:16,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/67de518b7100411e9db68911fef6277d is 1080, key is row0066/info:/1731883816429/Put/seqid=0 2024-11-17T22:50:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741845_1021 (size=17894) 2024-11-17T22:50:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741845_1021 (size=17894) 2024-11-17T22:50:16,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/67de518b7100411e9db68911fef6277d 2024-11-17T22:50:16,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/67de518b7100411e9db68911fef6277d as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/67de518b7100411e9db68911fef6277d 2024-11-17T22:50:16,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/67de518b7100411e9db68911fef6277d, entries=12, sequenceid=101, filesize=17.5 K 2024-11-17T22:50:16,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 1f4b8723a07f669c388b5143f2a00156 in 22ms, sequenceid=101, compaction requested=true 2024-11-17T22:50:16,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:16,476 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.9 K, sizeToCheck=16.0 K 2024-11-17T22:50:16,476 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:16,476 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29 because midkey is the same as first or last row 2024-11-17T22:50:16,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1f4b8723a07f669c388b5143f2a00156:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:16,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:16,476 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-17T22:50:16,478 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 98169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:16,478 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HStore(1541): 1f4b8723a07f669c388b5143f2a00156/info is initiating minor compaction (all files) 2024-11-17T22:50:16,478 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1f4b8723a07f669c388b5143f2a00156/info in TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:16,478 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/71482c7542e9400b8c52537bef2c3014, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/67de518b7100411e9db68911fef6277d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp, totalSize=95.9 K 2024-11-17T22:50:16,478 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.Compactor(225): Compacting c1718b0849a4472f9368c7f575bf3b29, keycount=55, bloomtype=ROW, size=63.0 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1731883812260 2024-11-17T22:50:16,479 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.Compactor(225): Compacting 71482c7542e9400b8c52537bef2c3014, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731883814402 2024-11-17T22:50:16,479 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.Compactor(225): Compacting 67de518b7100411e9db68911fef6277d, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731883816429 2024-11-17T22:50:16,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/e3d1fc1158424abcbdafe261f4cdafe9 is 1080, key is row0078/info:/1731883816455/Put/seqid=0 2024-11-17T22:50:16,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741846_1022 (size=17894) 2024-11-17T22:50:16,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741846_1022 (size=17894) 2024-11-17T22:50:16,493 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1f4b8723a07f669c388b5143f2a00156#info#compaction#64 average throughput is 26.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:16,494 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/fc203db1c0f8408d802962aa6d629465 is 1080, key is row0001/info:/1731883812260/Put/seqid=0 2024-11-17T22:50:16,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741847_1023 (size=88408) 2024-11-17T22:50:16,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741847_1023 (size=88408) 2024-11-17T22:50:16,505 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/fc203db1c0f8408d802962aa6d629465 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465 2024-11-17T22:50:16,513 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1f4b8723a07f669c388b5143f2a00156/info of 1f4b8723a07f669c388b5143f2a00156 into fc203db1c0f8408d802962aa6d629465(size=86.3 K), total size for store is 86.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:16,513 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:16,513 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., storeName=1f4b8723a07f669c388b5143f2a00156/info, priority=13, startTime=1731883816476; duration=0sec 2024-11-17T22:50:16,513 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=86.3 K, sizeToCheck=16.0 K 2024-11-17T22:50:16,513 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:16,513 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=86.3 K, sizeToCheck=16.0 K 2024-11-17T22:50:16,513 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:16,513 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=86.3 K, sizeToCheck=16.0 K 2024-11-17T22:50:16,513 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T22:50:16,515 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:16,515 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:16,515 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1f4b8723a07f669c388b5143f2a00156:info 2024-11-17T22:50:16,516 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46683 {}] assignment.AssignmentManager(1363): Split request from 1a6e40b21a48,35999,1731883800706, parent={ENCODED => 1f4b8723a07f669c388b5143f2a00156, NAME => 'TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-17T22:50:16,520 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46683 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:16,523 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46683 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1f4b8723a07f669c388b5143f2a00156, daughterA=a2911dcc4e336c4898f6e0e65ebebe40, daughterB=2071f94e963317acec39cea472aff95f 2024-11-17T22:50:16,524 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1f4b8723a07f669c388b5143f2a00156, daughterA=a2911dcc4e336c4898f6e0e65ebebe40, daughterB=2071f94e963317acec39cea472aff95f 2024-11-17T22:50:16,524 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1f4b8723a07f669c388b5143f2a00156, daughterA=a2911dcc4e336c4898f6e0e65ebebe40, daughterB=2071f94e963317acec39cea472aff95f 2024-11-17T22:50:16,524 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1f4b8723a07f669c388b5143f2a00156, daughterA=a2911dcc4e336c4898f6e0e65ebebe40, daughterB=2071f94e963317acec39cea472aff95f 2024-11-17T22:50:16,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, UNASSIGN}] 2024-11-17T22:50:16,531 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, UNASSIGN 2024-11-17T22:50:16,533 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=1f4b8723a07f669c388b5143f2a00156, regionState=CLOSING, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:16,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, UNASSIGN because future has completed 2024-11-17T22:50:16,535 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-17T22:50:16,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1f4b8723a07f669c388b5143f2a00156, server=1a6e40b21a48,35999,1731883800706}] 2024-11-17T22:50:16,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:16,695 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,696 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-17T22:50:16,697 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 1f4b8723a07f669c388b5143f2a00156, disabling compactions & flushes 2024-11-17T22:50:16,697 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1993): waiting for 0 compactions & cache flush to complete for region TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:16,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/e3d1fc1158424abcbdafe261f4cdafe9 2024-11-17T22:50:16,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/e3d1fc1158424abcbdafe261f4cdafe9 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e3d1fc1158424abcbdafe261f4cdafe9 2024-11-17T22:50:16,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e3d1fc1158424abcbdafe261f4cdafe9, entries=12, sequenceid=116, filesize=17.5 K 2024-11-17T22:50:16,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for 1f4b8723a07f669c388b5143f2a00156 in 433ms, sequenceid=116, compaction requested=false 2024-11-17T22:50:16,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1f4b8723a07f669c388b5143f2a00156: 2024-11-17T22:50:16,910 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:16,910 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:16,910 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. after waiting 0 ms 2024-11-17T22:50:16,910 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:16,910 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 1f4b8723a07f669c388b5143f2a00156 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:50:16,915 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/1ae807f572514264873b574936688d7a is 1080, key is row0090/info:/1731883816479/Put/seqid=0 2024-11-17T22:50:16,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741848_1024 (size=12509) 2024-11-17T22:50:16,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741848_1024 (size=12509) 2024-11-17T22:50:16,921 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/1ae807f572514264873b574936688d7a 2024-11-17T22:50:16,926 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/.tmp/info/1ae807f572514264873b574936688d7a as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/1ae807f572514264873b574936688d7a 2024-11-17T22:50:16,931 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/1ae807f572514264873b574936688d7a, entries=7, sequenceid=127, filesize=12.2 K 2024-11-17T22:50:16,932 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1f4b8723a07f669c388b5143f2a00156 in 22ms, sequenceid=127, compaction requested=true 2024-11-17T22:50:16,933 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/227478b583374433b3acc436a0966d86, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e52cb797151e40afab07686683800613, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c4c80e0546a6429eaafcbe884a5384dd, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/33149c266c8541278966700bc4bf35dd, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/71482c7542e9400b8c52537bef2c3014, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/67de518b7100411e9db68911fef6277d] to archive 2024-11-17T22:50:16,934 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T22:50:16,936 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/227478b583374433b3acc436a0966d86 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/227478b583374433b3acc436a0966d86 2024-11-17T22:50:16,937 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/ff9ec9aac1684bd7a7cad47a680d97d5 2024-11-17T22:50:16,938 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/f5b02f39e717456fa112c6b0cabc912e 2024-11-17T22:50:16,939 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e52cb797151e40afab07686683800613 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e52cb797151e40afab07686683800613 2024-11-17T22:50:16,940 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c4c80e0546a6429eaafcbe884a5384dd to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c4c80e0546a6429eaafcbe884a5384dd 2024-11-17T22:50:16,941 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/c1718b0849a4472f9368c7f575bf3b29 2024-11-17T22:50:16,942 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/33149c266c8541278966700bc4bf35dd to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/33149c266c8541278966700bc4bf35dd 2024-11-17T22:50:16,943 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/71482c7542e9400b8c52537bef2c3014 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/71482c7542e9400b8c52537bef2c3014 2024-11-17T22:50:16,943 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/67de518b7100411e9db68911fef6277d to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/67de518b7100411e9db68911fef6277d 2024-11-17T22:50:16,949 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-17T22:50:16,950 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. 2024-11-17T22:50:16,950 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 1f4b8723a07f669c388b5143f2a00156: Waiting for close lock at 1731883816697Running coprocessor pre-close hooks at 1731883816697Disabling compacts and flushes for region at 1731883816697Disabling writes for close at 1731883816910 (+213 ms)Obtaining lock to block concurrent updates at 1731883816910Preparing flush snapshotting stores in 1f4b8723a07f669c388b5143f2a00156 at 1731883816910Finished memstore snapshotting TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., syncing WAL and waiting on mvcc, flushsize=dataSize=7532, getHeapSize=8304, getOffHeapSize=0, getCellsCount=7 at 1731883816911 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. at 1731883816911Flushing 1f4b8723a07f669c388b5143f2a00156/info: creating writer at 1731883816911Flushing 1f4b8723a07f669c388b5143f2a00156/info: appending metadata at 1731883816914 (+3 ms)Flushing 1f4b8723a07f669c388b5143f2a00156/info: closing flushed file at 1731883816915 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f4c6444: reopening flushed file at 1731883816926 (+11 ms)Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1f4b8723a07f669c388b5143f2a00156 in 22ms, sequenceid=127, compaction requested=true at 1731883816932 (+6 ms)Writing region close event to WAL at 1731883816946 (+14 ms)Running coprocessor post-close hooks at 1731883816950 (+4 ms)Closed at 1731883816950 2024-11-17T22:50:16,952 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,953 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=1f4b8723a07f669c388b5143f2a00156, regionState=CLOSED 2024-11-17T22:50:16,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1f4b8723a07f669c388b5143f2a00156, server=1a6e40b21a48,35999,1731883800706 because future has completed 2024-11-17T22:50:16,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-17T22:50:16,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 1f4b8723a07f669c388b5143f2a00156, server=1a6e40b21a48,35999,1731883800706 in 420 msec 2024-11-17T22:50:16,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-17T22:50:16,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1f4b8723a07f669c388b5143f2a00156, UNASSIGN in 429 msec 2024-11-17T22:50:16,969 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:16,972 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=1f4b8723a07f669c388b5143f2a00156, threads=3 2024-11-17T22:50:16,974 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e3d1fc1158424abcbdafe261f4cdafe9 for region: 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,974 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465 for region: 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,974 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/1ae807f572514264873b574936688d7a for region: 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,983 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/1ae807f572514264873b574936688d7a, top=true 2024-11-17T22:50:16,983 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e3d1fc1158424abcbdafe261f4cdafe9, top=true 2024-11-17T22:50:16,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741849_1025 (size=27) 2024-11-17T22:50:16,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741849_1025 (size=27) 2024-11-17T22:50:16,992 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-1ae807f572514264873b574936688d7a for child: 2071f94e963317acec39cea472aff95f, parent: 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,992 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/1ae807f572514264873b574936688d7a for region: 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,994 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-e3d1fc1158424abcbdafe261f4cdafe9 for child: 2071f94e963317acec39cea472aff95f, parent: 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:16,994 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/e3d1fc1158424abcbdafe261f4cdafe9 for region: 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:17,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741850_1026 (size=27) 2024-11-17T22:50:17,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741850_1026 (size=27) 2024-11-17T22:50:17,004 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465 for region: 1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:17,007 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 1f4b8723a07f669c388b5143f2a00156 Daughter A: [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156] storefiles, Daughter B: [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-1ae807f572514264873b574936688d7a, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-e3d1fc1158424abcbdafe261f4cdafe9, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156] storefiles. 2024-11-17T22:50:17,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741851_1027 (size=71) 2024-11-17T22:50:17,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741851_1027 (size=71) 2024-11-17T22:50:17,018 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:17,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741852_1028 (size=71) 2024-11-17T22:50:17,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741852_1028 (size=71) 2024-11-17T22:50:17,033 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:17,043 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-17T22:50:17,045 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-17T22:50:17,047 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731883817047"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731883817047"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731883817047"}]},"ts":"1731883817047"} 2024-11-17T22:50:17,047 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731883817047"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731883817047"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731883817047"}]},"ts":"1731883817047"} 2024-11-17T22:50:17,047 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731883817047"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731883817047"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731883817047"}]},"ts":"1731883817047"} 2024-11-17T22:50:17,064 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a2911dcc4e336c4898f6e0e65ebebe40, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2071f94e963317acec39cea472aff95f, ASSIGN}] 2024-11-17T22:50:17,065 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2071f94e963317acec39cea472aff95f, ASSIGN 2024-11-17T22:50:17,065 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a2911dcc4e336c4898f6e0e65ebebe40, ASSIGN 2024-11-17T22:50:17,066 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2071f94e963317acec39cea472aff95f, ASSIGN; state=SPLITTING_NEW, location=1a6e40b21a48,35999,1731883800706; forceNewPlan=false, retain=false 2024-11-17T22:50:17,066 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a2911dcc4e336c4898f6e0e65ebebe40, ASSIGN; state=SPLITTING_NEW, location=1a6e40b21a48,35999,1731883800706; forceNewPlan=false, retain=false 2024-11-17T22:50:17,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:17,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:17,217 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=2071f94e963317acec39cea472aff95f, regionState=OPENING, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:17,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=a2911dcc4e336c4898f6e0e65ebebe40, regionState=OPENING, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:17,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2071f94e963317acec39cea472aff95f, ASSIGN because future has completed 2024-11-17T22:50:17,220 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2071f94e963317acec39cea472aff95f, server=1a6e40b21a48,35999,1731883800706}] 2024-11-17T22:50:17,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a2911dcc4e336c4898f6e0e65ebebe40, ASSIGN because future has completed 2024-11-17T22:50:17,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure a2911dcc4e336c4898f6e0e65ebebe40, server=1a6e40b21a48,35999,1731883800706}] 2024-11-17T22:50:17,383 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:17,384 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => a2911dcc4e336c4898f6e0e65ebebe40, NAME => 'TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-17T22:50:17,384 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,384 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:17,384 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,384 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,386 INFO [StoreOpener-a2911dcc4e336c4898f6e0e65ebebe40-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,387 INFO [StoreOpener-a2911dcc4e336c4898f6e0e65ebebe40-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2911dcc4e336c4898f6e0e65ebebe40 columnFamilyName info 2024-11-17T22:50:17,387 DEBUG [StoreOpener-a2911dcc4e336c4898f6e0e65ebebe40-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:17,405 DEBUG [StoreOpener-a2911dcc4e336c4898f6e0e65ebebe40-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156->hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465-bottom 2024-11-17T22:50:17,405 INFO [StoreOpener-a2911dcc4e336c4898f6e0e65ebebe40-1 {}] regionserver.HStore(327): Store=a2911dcc4e336c4898f6e0e65ebebe40/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:17,406 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,407 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,408 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,409 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,409 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,411 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,412 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened a2911dcc4e336c4898f6e0e65ebebe40; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821687, jitterRate=0.044829487800598145}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T22:50:17,412 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:17,412 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for a2911dcc4e336c4898f6e0e65ebebe40: Running coprocessor pre-open hook at 1731883817384Writing region info on filesystem at 1731883817384Initializing all the Stores at 1731883817385 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883817385Cleaning up temporary data from old regions at 1731883817409 (+24 ms)Running coprocessor post-open hooks at 1731883817412 (+3 ms)Region opened successfully at 1731883817412 2024-11-17T22:50:17,413 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40., pid=13, masterSystemTime=1731883817374 2024-11-17T22:50:17,414 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store a2911dcc4e336c4898f6e0e65ebebe40:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:17,414 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-17T22:50:17,414 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:17,414 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:17,414 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): a2911dcc4e336c4898f6e0e65ebebe40/info is initiating minor compaction (all files) 2024-11-17T22:50:17,414 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a2911dcc4e336c4898f6e0e65ebebe40/info in TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:17,414 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156->hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465-bottom] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/.tmp, totalSize=86.3 K 2024-11-17T22:50:17,415 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731883812260 2024-11-17T22:50:17,416 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:17,416 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:17,416 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:17,416 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 2071f94e963317acec39cea472aff95f, NAME => 'TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-17T22:50:17,417 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,417 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=a2911dcc4e336c4898f6e0e65ebebe40, regionState=OPEN, openSeqNum=131, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:17,417 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:17,417 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,417 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,418 INFO [StoreOpener-2071f94e963317acec39cea472aff95f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,419 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-17T22:50:17,419 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-17T22:50:17,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-17T22:50:17,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure a2911dcc4e336c4898f6e0e65ebebe40, server=1a6e40b21a48,35999,1731883800706 because future has completed 2024-11-17T22:50:17,419 INFO [StoreOpener-2071f94e963317acec39cea472aff95f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2071f94e963317acec39cea472aff95f columnFamilyName info 2024-11-17T22:50:17,419 DEBUG [StoreOpener-2071f94e963317acec39cea472aff95f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:17,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-17T22:50:17,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure a2911dcc4e336c4898f6e0e65ebebe40, server=1a6e40b21a48,35999,1731883800706 in 198 msec 2024-11-17T22:50:17,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a2911dcc4e336c4898f6e0e65ebebe40, ASSIGN in 359 msec 2024-11-17T22:50:17,429 DEBUG [StoreOpener-2071f94e963317acec39cea472aff95f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-1ae807f572514264873b574936688d7a 2024-11-17T22:50:17,433 DEBUG [StoreOpener-2071f94e963317acec39cea472aff95f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-e3d1fc1158424abcbdafe261f4cdafe9 2024-11-17T22:50:17,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/info/f88b93933b1a48c0bfc90fb8656fc801 is 193, key is TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f./info:regioninfo/1731883817217/Put/seqid=0 2024-11-17T22:50:17,437 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2911dcc4e336c4898f6e0e65ebebe40#info#compaction#66 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:17,438 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/.tmp/info/790cd66381e24994aabb2f56eda43e22 is 1080, key is row0001/info:/1731883812260/Put/seqid=0 2024-11-17T22:50:17,440 DEBUG [StoreOpener-2071f94e963317acec39cea472aff95f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156->hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465-top 2024-11-17T22:50:17,440 INFO [StoreOpener-2071f94e963317acec39cea472aff95f-1 {}] regionserver.HStore(327): Store=2071f94e963317acec39cea472aff95f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:17,441 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,442 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741853_1029 (size=9847) 2024-11-17T22:50:17,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741853_1029 (size=9847) 2024-11-17T22:50:17,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741854_1030 (size=70862) 2024-11-17T22:50:17,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741854_1030 (size=70862) 2024-11-17T22:50:17,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/info/f88b93933b1a48c0bfc90fb8656fc801 2024-11-17T22:50:17,443 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,444 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,444 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,445 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,447 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 2071f94e963317acec39cea472aff95f; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733828, jitterRate=-0.06689050793647766}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T22:50:17,447 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:17,447 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 2071f94e963317acec39cea472aff95f: Running coprocessor pre-open hook at 1731883817417Writing region info on filesystem at 1731883817417Initializing all the Stores at 1731883817418 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883817418Cleaning up temporary data from old regions at 1731883817444 (+26 ms)Running coprocessor post-open hooks at 1731883817447 (+3 ms)Region opened successfully at 1731883817447 2024-11-17T22:50:17,448 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., pid=12, masterSystemTime=1731883817374 2024-11-17T22:50:17,448 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 2071f94e963317acec39cea472aff95f:info, priority=-2147483648, current under compaction store size is 2 2024-11-17T22:50:17,448 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:17,448 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:17,449 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/.tmp/info/790cd66381e24994aabb2f56eda43e22 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/info/790cd66381e24994aabb2f56eda43e22 2024-11-17T22:50:17,449 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:17,449 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HStore(1541): 2071f94e963317acec39cea472aff95f/info is initiating minor compaction (all files) 2024-11-17T22:50:17,450 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2071f94e963317acec39cea472aff95f/info in TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:17,450 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156->hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465-top, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-e3d1fc1158424abcbdafe261f4cdafe9, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-1ae807f572514264873b574936688d7a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp, totalSize=116.0 K 2024-11-17T22:50:17,450 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.Compactor(225): Compacting fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731883812260 2024-11-17T22:50:17,451 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-e3d1fc1158424abcbdafe261f4cdafe9, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731883816455 2024-11-17T22:50:17,451 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-1ae807f572514264873b574936688d7a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731883816479 2024-11-17T22:50:17,451 DEBUG [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:17,451 INFO [RS_OPEN_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:17,452 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=2071f94e963317acec39cea472aff95f, regionState=OPEN, openSeqNum=131, regionLocation=1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:17,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2071f94e963317acec39cea472aff95f, server=1a6e40b21a48,35999,1731883800706 because future has completed 2024-11-17T22:50:17,458 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in a2911dcc4e336c4898f6e0e65ebebe40/info of a2911dcc4e336c4898f6e0e65ebebe40 into 790cd66381e24994aabb2f56eda43e22(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:17,458 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a2911dcc4e336c4898f6e0e65ebebe40: 2024-11-17T22:50:17,458 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40., storeName=a2911dcc4e336c4898f6e0e65ebebe40/info, priority=15, startTime=1731883817413; duration=0sec 2024-11-17T22:50:17,458 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:17,458 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2911dcc4e336c4898f6e0e65ebebe40:info 2024-11-17T22:50:17,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-17T22:50:17,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 2071f94e963317acec39cea472aff95f, server=1a6e40b21a48,35999,1731883800706 in 237 msec 2024-11-17T22:50:17,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-17T22:50:17,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2071f94e963317acec39cea472aff95f, ASSIGN in 396 msec 2024-11-17T22:50:17,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1f4b8723a07f669c388b5143f2a00156, daughterA=a2911dcc4e336c4898f6e0e65ebebe40, daughterB=2071f94e963317acec39cea472aff95f in 943 msec 2024-11-17T22:50:17,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/ns/c8b025ba372f447a806a214f3a25b414 is 43, key is default/ns:d/1731883802085/Put/seqid=0 2024-11-17T22:50:17,478 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2071f94e963317acec39cea472aff95f#info#compaction#69 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:17,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741855_1031 (size=5153) 2024-11-17T22:50:17,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741855_1031 (size=5153) 2024-11-17T22:50:17,479 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/34d9f4a1a6524f01a7f7665edc6a6ed3 is 1080, key is row0062/info:/1731883814417/Put/seqid=0 2024-11-17T22:50:17,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/ns/c8b025ba372f447a806a214f3a25b414 2024-11-17T22:50:17,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741856_1032 (size=42984) 2024-11-17T22:50:17,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741856_1032 (size=42984) 2024-11-17T22:50:17,489 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/34d9f4a1a6524f01a7f7665edc6a6ed3 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/34d9f4a1a6524f01a7f7665edc6a6ed3 2024-11-17T22:50:17,496 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2071f94e963317acec39cea472aff95f/info of 2071f94e963317acec39cea472aff95f into 34d9f4a1a6524f01a7f7665edc6a6ed3(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:17,496 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:17,496 INFO [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., storeName=2071f94e963317acec39cea472aff95f/info, priority=13, startTime=1731883817448; duration=0sec 2024-11-17T22:50:17,496 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:17,496 DEBUG [RS:0;1a6e40b21a48:35999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2071f94e963317acec39cea472aff95f:info 2024-11-17T22:50:17,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/table/b33e3929480f40adb3cd75effe9480ce is 65, key is TestLogRolling-testLogRolling/table:state/1731883802548/Put/seqid=0 2024-11-17T22:50:17,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741857_1033 (size=5340) 2024-11-17T22:50:17,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741857_1033 (size=5340) 2024-11-17T22:50:17,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/table/b33e3929480f40adb3cd75effe9480ce 2024-11-17T22:50:17,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/info/f88b93933b1a48c0bfc90fb8656fc801 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/info/f88b93933b1a48c0bfc90fb8656fc801 2024-11-17T22:50:17,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/info/f88b93933b1a48c0bfc90fb8656fc801, entries=30, sequenceid=17, filesize=9.6 K 2024-11-17T22:50:17,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/ns/c8b025ba372f447a806a214f3a25b414 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/ns/c8b025ba372f447a806a214f3a25b414 2024-11-17T22:50:17,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/ns/c8b025ba372f447a806a214f3a25b414, entries=2, sequenceid=17, filesize=5.0 K 2024-11-17T22:50:17,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/table/b33e3929480f40adb3cd75effe9480ce as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/table/b33e3929480f40adb3cd75effe9480ce 2024-11-17T22:50:17,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/table/b33e3929480f40adb3cd75effe9480ce, entries=2, sequenceid=17, filesize=5.2 K 2024-11-17T22:50:17,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 108ms, sequenceid=17, compaction requested=false 2024-11-17T22:50:17,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T22:50:17,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:18,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:18,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:42766 deadline: 1731883828493, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. is not online on 1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:18,520 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., hostname=1a6e40b21a48,35999,1731883800706, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., hostname=1a6e40b21a48,35999,1731883800706, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. is not online on 1a6e40b21a48,35999,1731883800706 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T22:50:18,520 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., hostname=1a6e40b21a48,35999,1731883800706, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156. is not online on 1a6e40b21a48,35999,1731883800706 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T22:50:18,520 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731883802171.1f4b8723a07f669c388b5143f2a00156., hostname=1a6e40b21a48,35999,1731883800706, seqNum=2 from cache 2024-11-17T22:50:18,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:19,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:19,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:19,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:20,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:20,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:20,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:21,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:21,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:21,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:21,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:21,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:22,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:22,508 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T22:50:22,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T22:50:22,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:23,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:23,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:23,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:24,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:24,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:24,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:25,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:25,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:25,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:26,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:26,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:26,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:27,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:27,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:27,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:28,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:28,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:28,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:28,595 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., hostname=1a6e40b21a48,35999,1731883800706, seqNum=131] 2024-11-17T22:50:28,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:28,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:50:28,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/a5b3dacc87de4626b02559365c2b1c05 is 1080, key is row0097/info:/1731883828596/Put/seqid=0 2024-11-17T22:50:28,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741858_1034 (size=12516) 2024-11-17T22:50:28,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741858_1034 (size=12516) 2024-11-17T22:50:28,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/a5b3dacc87de4626b02559365c2b1c05 2024-11-17T22:50:28,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/a5b3dacc87de4626b02559365c2b1c05 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/a5b3dacc87de4626b02559365c2b1c05 2024-11-17T22:50:28,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/a5b3dacc87de4626b02559365c2b1c05, entries=7, sequenceid=141, filesize=12.2 K 2024-11-17T22:50:28,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 2071f94e963317acec39cea472aff95f in 25ms, sequenceid=141, compaction requested=false 2024-11-17T22:50:28,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:28,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:28,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T22:50:28,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/54953a3ea39944c287cd7c73695bb247 is 1080, key is row0104/info:/1731883828607/Put/seqid=0 2024-11-17T22:50:28,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741859_1035 (size=19000) 2024-11-17T22:50:28,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741859_1035 (size=19000) 2024-11-17T22:50:28,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/54953a3ea39944c287cd7c73695bb247 2024-11-17T22:50:28,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/54953a3ea39944c287cd7c73695bb247 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/54953a3ea39944c287cd7c73695bb247 2024-11-17T22:50:28,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/54953a3ea39944c287cd7c73695bb247, entries=13, sequenceid=157, filesize=18.6 K 2024-11-17T22:50:28,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for 2071f94e963317acec39cea472aff95f in 22ms, sequenceid=157, compaction requested=true 2024-11-17T22:50:28,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:28,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2071f94e963317acec39cea472aff95f:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:28,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:28,655 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:28,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:28,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-17T22:50:28,656 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74500 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:28,656 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 2071f94e963317acec39cea472aff95f/info is initiating minor compaction (all files) 2024-11-17T22:50:28,656 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2071f94e963317acec39cea472aff95f/info in TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:28,656 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/34d9f4a1a6524f01a7f7665edc6a6ed3, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/a5b3dacc87de4626b02559365c2b1c05, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/54953a3ea39944c287cd7c73695bb247] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp, totalSize=72.8 K 2024-11-17T22:50:28,656 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 34d9f4a1a6524f01a7f7665edc6a6ed3, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731883814417 2024-11-17T22:50:28,657 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5b3dacc87de4626b02559365c2b1c05, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731883828596 2024-11-17T22:50:28,657 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 54953a3ea39944c287cd7c73695bb247, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731883828607 2024-11-17T22:50:28,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/67852e8db3e04b9a99ef00275c917bae is 1080, key is row0117/info:/1731883828633/Put/seqid=0 2024-11-17T22:50:28,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741860_1036 (size=17906) 2024-11-17T22:50:28,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741860_1036 (size=17906) 2024-11-17T22:50:28,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/67852e8db3e04b9a99ef00275c917bae 2024-11-17T22:50:28,670 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2071f94e963317acec39cea472aff95f#info#compaction#74 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:28,671 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/6a3c59d0b6c344c2a0d8ea2a398436be is 1080, key is row0062/info:/1731883814417/Put/seqid=0 2024-11-17T22:50:28,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741861_1037 (size=64714) 2024-11-17T22:50:28,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741861_1037 (size=64714) 2024-11-17T22:50:28,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/67852e8db3e04b9a99ef00275c917bae as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/67852e8db3e04b9a99ef00275c917bae 2024-11-17T22:50:28,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/67852e8db3e04b9a99ef00275c917bae, entries=12, sequenceid=172, filesize=17.5 K 2024-11-17T22:50:28,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 2071f94e963317acec39cea472aff95f in 28ms, sequenceid=172, compaction requested=false 2024-11-17T22:50:28,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:28,683 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/6a3c59d0b6c344c2a0d8ea2a398436be as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/6a3c59d0b6c344c2a0d8ea2a398436be 2024-11-17T22:50:28,689 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2071f94e963317acec39cea472aff95f/info of 2071f94e963317acec39cea472aff95f into 6a3c59d0b6c344c2a0d8ea2a398436be(size=63.2 K), total size for store is 80.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:28,690 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:28,690 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., storeName=2071f94e963317acec39cea472aff95f/info, priority=13, startTime=1731883828654; duration=0sec 2024-11-17T22:50:28,690 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:28,690 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2071f94e963317acec39cea472aff95f:info 2024-11-17T22:50:29,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:29,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:29,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:30,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:30,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:30,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:30,627 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T22:50:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:30,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:50:30,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/9ecf09abef9a49cabd42631a97b4141f is 1080, key is row0129/info:/1731883830658/Put/seqid=0 2024-11-17T22:50:30,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741862_1038 (size=12516) 2024-11-17T22:50:30,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741862_1038 (size=12516) 2024-11-17T22:50:30,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/9ecf09abef9a49cabd42631a97b4141f 2024-11-17T22:50:30,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/9ecf09abef9a49cabd42631a97b4141f as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9ecf09abef9a49cabd42631a97b4141f 2024-11-17T22:50:30,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9ecf09abef9a49cabd42631a97b4141f, entries=7, sequenceid=183, filesize=12.2 K 2024-11-17T22:50:30,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 2071f94e963317acec39cea472aff95f in 28ms, sequenceid=183, compaction requested=true 2024-11-17T22:50:30,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:30,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2071f94e963317acec39cea472aff95f:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:30,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:30,711 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:30,712 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 95136 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:30,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T22:50:30,712 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 2071f94e963317acec39cea472aff95f/info is initiating minor compaction (all files) 2024-11-17T22:50:30,712 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2071f94e963317acec39cea472aff95f/info in TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:30,713 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/6a3c59d0b6c344c2a0d8ea2a398436be, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/67852e8db3e04b9a99ef00275c917bae, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9ecf09abef9a49cabd42631a97b4141f] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp, totalSize=92.9 K 2024-11-17T22:50:30,713 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a3c59d0b6c344c2a0d8ea2a398436be, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731883814417 2024-11-17T22:50:30,714 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 67852e8db3e04b9a99ef00275c917bae, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1731883828633 2024-11-17T22:50:30,714 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ecf09abef9a49cabd42631a97b4141f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1731883830658 2024-11-17T22:50:30,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/dacdb9051cfb40c897058b7f1c6908a7 is 1080, key is row0136/info:/1731883830684/Put/seqid=0 2024-11-17T22:50:30,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741863_1039 (size=19000) 2024-11-17T22:50:30,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741863_1039 (size=19000) 2024-11-17T22:50:30,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/dacdb9051cfb40c897058b7f1c6908a7 2024-11-17T22:50:30,730 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2071f94e963317acec39cea472aff95f#info#compaction#77 average throughput is 25.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:30,730 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/453b100f66d8409cbdd7e299844b56a7 is 1080, key is row0062/info:/1731883814417/Put/seqid=0 2024-11-17T22:50:30,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/dacdb9051cfb40c897058b7f1c6908a7 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dacdb9051cfb40c897058b7f1c6908a7 2024-11-17T22:50:30,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741864_1040 (size=85371) 2024-11-17T22:50:30,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741864_1040 (size=85371) 2024-11-17T22:50:30,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dacdb9051cfb40c897058b7f1c6908a7, entries=13, sequenceid=199, filesize=18.6 K 2024-11-17T22:50:30,741 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/453b100f66d8409cbdd7e299844b56a7 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/453b100f66d8409cbdd7e299844b56a7 2024-11-17T22:50:30,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 2071f94e963317acec39cea472aff95f in 29ms, sequenceid=199, compaction requested=false 2024-11-17T22:50:30,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:30,748 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2071f94e963317acec39cea472aff95f/info of 2071f94e963317acec39cea472aff95f into 453b100f66d8409cbdd7e299844b56a7(size=83.4 K), total size for store is 101.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:30,748 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:30,748 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., storeName=2071f94e963317acec39cea472aff95f/info, priority=13, startTime=1731883830711; duration=0sec 2024-11-17T22:50:30,748 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:30,748 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2071f94e963317acec39cea472aff95f:info 2024-11-17T22:50:31,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:31,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:31,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:32,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:32,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:32,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:32,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:32,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T22:50:32,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/dae5c868c75f4de69b8e24bad968859a is 1080, key is row0149/info:/1731883830713/Put/seqid=0 2024-11-17T22:50:32,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741865_1041 (size=19000) 2024-11-17T22:50:32,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741865_1041 (size=19000) 2024-11-17T22:50:32,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/dae5c868c75f4de69b8e24bad968859a 2024-11-17T22:50:32,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/dae5c868c75f4de69b8e24bad968859a as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dae5c868c75f4de69b8e24bad968859a 2024-11-17T22:50:32,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dae5c868c75f4de69b8e24bad968859a, entries=13, sequenceid=216, filesize=18.6 K 2024-11-17T22:50:32,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 2071f94e963317acec39cea472aff95f in 28ms, sequenceid=216, compaction requested=true 2024-11-17T22:50:32,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:32,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2071f94e963317acec39cea472aff95f:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:32,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:32,771 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:32,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:32,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-17T22:50:32,772 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123371 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:32,772 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 2071f94e963317acec39cea472aff95f/info is initiating minor compaction (all files) 2024-11-17T22:50:32,772 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2071f94e963317acec39cea472aff95f/info in TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:32,772 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/453b100f66d8409cbdd7e299844b56a7, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dacdb9051cfb40c897058b7f1c6908a7, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dae5c868c75f4de69b8e24bad968859a] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp, totalSize=120.5 K 2024-11-17T22:50:32,773 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 453b100f66d8409cbdd7e299844b56a7, keycount=74, bloomtype=ROW, size=83.4 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1731883814417 2024-11-17T22:50:32,773 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting dacdb9051cfb40c897058b7f1c6908a7, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731883830684 2024-11-17T22:50:32,774 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting dae5c868c75f4de69b8e24bad968859a, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731883830713 2024-11-17T22:50:32,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/8b9a924adc814a67b9913bf667d8f062 is 1080, key is row0162/info:/1731883832746/Put/seqid=0 2024-11-17T22:50:32,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741866_1042 (size=16828) 2024-11-17T22:50:32,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741866_1042 (size=16828) 2024-11-17T22:50:32,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/8b9a924adc814a67b9913bf667d8f062 2024-11-17T22:50:32,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/8b9a924adc814a67b9913bf667d8f062 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/8b9a924adc814a67b9913bf667d8f062 2024-11-17T22:50:32,791 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2071f94e963317acec39cea472aff95f#info#compaction#80 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:32,792 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/5e6d61ce027f4001b5b6c7dd28f52c11 is 1080, key is row0062/info:/1731883814417/Put/seqid=0 2024-11-17T22:50:32,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/8b9a924adc814a67b9913bf667d8f062, entries=11, sequenceid=230, filesize=16.4 K 2024-11-17T22:50:32,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 2071f94e963317acec39cea472aff95f in 22ms, sequenceid=230, compaction requested=false 2024-11-17T22:50:32,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:32,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:32,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-17T22:50:32,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741867_1043 (size=113509) 2024-11-17T22:50:32,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741867_1043 (size=113509) 2024-11-17T22:50:32,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/c275f4f7aa1c4ac3bbea51cd32f94812 is 1080, key is row0173/info:/1731883832773/Put/seqid=0 2024-11-17T22:50:32,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741868_1044 (size=15750) 2024-11-17T22:50:32,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741868_1044 (size=15750) 2024-11-17T22:50:32,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/c275f4f7aa1c4ac3bbea51cd32f94812 2024-11-17T22:50:32,805 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/5e6d61ce027f4001b5b6c7dd28f52c11 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/5e6d61ce027f4001b5b6c7dd28f52c11 2024-11-17T22:50:32,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/c275f4f7aa1c4ac3bbea51cd32f94812 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/c275f4f7aa1c4ac3bbea51cd32f94812 2024-11-17T22:50:32,810 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2071f94e963317acec39cea472aff95f/info of 2071f94e963317acec39cea472aff95f into 5e6d61ce027f4001b5b6c7dd28f52c11(size=110.8 K), total size for store is 127.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:32,810 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:32,810 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., storeName=2071f94e963317acec39cea472aff95f/info, priority=13, startTime=1731883832771; duration=0sec 2024-11-17T22:50:32,810 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:32,810 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2071f94e963317acec39cea472aff95f:info 2024-11-17T22:50:32,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/c275f4f7aa1c4ac3bbea51cd32f94812, entries=10, sequenceid=243, filesize=15.4 K 2024-11-17T22:50:32,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for 2071f94e963317acec39cea472aff95f in 21ms, sequenceid=243, compaction requested=true 2024-11-17T22:50:32,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:32,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2071f94e963317acec39cea472aff95f:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:32,816 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:32,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:32,817 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 146087 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:32,817 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 2071f94e963317acec39cea472aff95f/info is initiating minor compaction (all files) 2024-11-17T22:50:32,817 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2071f94e963317acec39cea472aff95f/info in TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:32,817 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/5e6d61ce027f4001b5b6c7dd28f52c11, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/8b9a924adc814a67b9913bf667d8f062, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/c275f4f7aa1c4ac3bbea51cd32f94812] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp, totalSize=142.7 K 2024-11-17T22:50:32,817 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5e6d61ce027f4001b5b6c7dd28f52c11, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731883814417 2024-11-17T22:50:32,818 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8b9a924adc814a67b9913bf667d8f062, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1731883832746 2024-11-17T22:50:32,818 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting c275f4f7aa1c4ac3bbea51cd32f94812, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1731883832773 2024-11-17T22:50:32,828 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2071f94e963317acec39cea472aff95f#info#compaction#82 average throughput is 62.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:32,829 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/9fa7df2936eb4fa89a61d202b1559f0a is 1080, key is row0062/info:/1731883814417/Put/seqid=0 2024-11-17T22:50:32,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741869_1045 (size=136381) 2024-11-17T22:50:32,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741869_1045 (size=136381) 2024-11-17T22:50:32,838 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/9fa7df2936eb4fa89a61d202b1559f0a as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9fa7df2936eb4fa89a61d202b1559f0a 2024-11-17T22:50:32,845 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2071f94e963317acec39cea472aff95f/info of 2071f94e963317acec39cea472aff95f into 9fa7df2936eb4fa89a61d202b1559f0a(size=133.2 K), total size for store is 133.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:32,845 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:32,845 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., storeName=2071f94e963317acec39cea472aff95f/info, priority=13, startTime=1731883832816; duration=0sec 2024-11-17T22:50:32,845 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:32,845 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2071f94e963317acec39cea472aff95f:info 2024-11-17T22:50:33,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:33,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:33,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:34,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:34,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:34,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:34,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-17T22:50:34,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/aa06e2e9ba234ebf9e8c3aec4b6f4095 is 1080, key is row0183/info:/1731883832795/Put/seqid=0 2024-11-17T22:50:34,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741870_1046 (size=16829) 2024-11-17T22:50:34,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741870_1046 (size=16829) 2024-11-17T22:50:34,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/aa06e2e9ba234ebf9e8c3aec4b6f4095 2024-11-17T22:50:34,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/aa06e2e9ba234ebf9e8c3aec4b6f4095 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/aa06e2e9ba234ebf9e8c3aec4b6f4095 2024-11-17T22:50:34,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/aa06e2e9ba234ebf9e8c3aec4b6f4095, entries=11, sequenceid=259, filesize=16.4 K 2024-11-17T22:50:34,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for 2071f94e963317acec39cea472aff95f in 29ms, sequenceid=259, compaction requested=false 2024-11-17T22:50:34,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:34,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T22:50:34,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/effa810d11e946a18a1fe36ea9e8a112 is 1080, key is row0194/info:/1731883834821/Put/seqid=0 2024-11-17T22:50:34,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741871_1047 (size=19013) 2024-11-17T22:50:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741871_1047 (size=19013) 2024-11-17T22:50:34,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/effa810d11e946a18a1fe36ea9e8a112 2024-11-17T22:50:34,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/effa810d11e946a18a1fe36ea9e8a112 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/effa810d11e946a18a1fe36ea9e8a112 2024-11-17T22:50:34,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/effa810d11e946a18a1fe36ea9e8a112, entries=13, sequenceid=275, filesize=18.6 K 2024-11-17T22:50:34,872 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for 2071f94e963317acec39cea472aff95f in 22ms, sequenceid=275, compaction requested=true 2024-11-17T22:50:34,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:34,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2071f94e963317acec39cea472aff95f:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:34,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:34,872 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:34,874 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 172223 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:34,874 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 2071f94e963317acec39cea472aff95f/info is initiating minor compaction (all files) 2024-11-17T22:50:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:34,874 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2071f94e963317acec39cea472aff95f/info in TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:34,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-17T22:50:34,874 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9fa7df2936eb4fa89a61d202b1559f0a, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/aa06e2e9ba234ebf9e8c3aec4b6f4095, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/effa810d11e946a18a1fe36ea9e8a112] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp, totalSize=168.2 K 2024-11-17T22:50:34,875 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9fa7df2936eb4fa89a61d202b1559f0a, keycount=121, bloomtype=ROW, size=133.2 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1731883814417 2024-11-17T22:50:34,875 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa06e2e9ba234ebf9e8c3aec4b6f4095, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1731883832795 2024-11-17T22:50:34,875 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting effa810d11e946a18a1fe36ea9e8a112, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731883834821 2024-11-17T22:50:34,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/9342657d51b54ad2822644bd5429ccf3 is 1080, key is row0207/info:/1731883834851/Put/seqid=0 2024-11-17T22:50:34,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741872_1048 (size=17918) 2024-11-17T22:50:34,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741872_1048 (size=17918) 2024-11-17T22:50:34,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/9342657d51b54ad2822644bd5429ccf3 2024-11-17T22:50:34,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/9342657d51b54ad2822644bd5429ccf3 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9342657d51b54ad2822644bd5429ccf3 2024-11-17T22:50:34,897 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2071f94e963317acec39cea472aff95f#info#compaction#86 average throughput is 49.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:34,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9342657d51b54ad2822644bd5429ccf3, entries=12, sequenceid=290, filesize=17.5 K 2024-11-17T22:50:34,898 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/add8902209e34c42b5ae78a46a857784 is 1080, key is row0062/info:/1731883814417/Put/seqid=0 2024-11-17T22:50:34,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=6.30 KB/6456 for 2071f94e963317acec39cea472aff95f in 24ms, sequenceid=290, compaction requested=false 2024-11-17T22:50:34,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:34,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741873_1049 (size=162446) 2024-11-17T22:50:34,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741873_1049 (size=162446) 2024-11-17T22:50:34,907 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/add8902209e34c42b5ae78a46a857784 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/add8902209e34c42b5ae78a46a857784 2024-11-17T22:50:34,912 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2071f94e963317acec39cea472aff95f/info of 2071f94e963317acec39cea472aff95f into add8902209e34c42b5ae78a46a857784(size=158.6 K), total size for store is 176.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:34,912 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:34,912 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., storeName=2071f94e963317acec39cea472aff95f/info, priority=13, startTime=1731883834872; duration=0sec 2024-11-17T22:50:34,912 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:34,912 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2071f94e963317acec39cea472aff95f:info 2024-11-17T22:50:35,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:35,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:35,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:36,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:36,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:36,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:36,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T22:50:36,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/4970d8b545074bda863a1950b699aab2 is 1080, key is row0219/info:/1731883834875/Put/seqid=0 2024-11-17T22:50:36,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741874_1050 (size=12523) 2024-11-17T22:50:36,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741874_1050 (size=12523) 2024-11-17T22:50:36,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/4970d8b545074bda863a1950b699aab2 2024-11-17T22:50:36,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/4970d8b545074bda863a1950b699aab2 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/4970d8b545074bda863a1950b699aab2 2024-11-17T22:50:36,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/4970d8b545074bda863a1950b699aab2, entries=7, sequenceid=301, filesize=12.2 K 2024-11-17T22:50:36,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 2071f94e963317acec39cea472aff95f in 31ms, sequenceid=301, compaction requested=true 2024-11-17T22:50:36,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:36,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2071f94e963317acec39cea472aff95f:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:36,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:36,923 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:36,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:36,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T22:50:36,925 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192887 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:36,925 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 2071f94e963317acec39cea472aff95f/info is initiating minor compaction (all files) 2024-11-17T22:50:36,925 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2071f94e963317acec39cea472aff95f/info in TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:36,925 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/add8902209e34c42b5ae78a46a857784, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9342657d51b54ad2822644bd5429ccf3, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/4970d8b545074bda863a1950b699aab2] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp, totalSize=188.4 K 2024-11-17T22:50:36,925 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting add8902209e34c42b5ae78a46a857784, keycount=145, bloomtype=ROW, size=158.6 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731883814417 2024-11-17T22:50:36,926 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9342657d51b54ad2822644bd5429ccf3, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1731883834851 2024-11-17T22:50:36,926 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4970d8b545074bda863a1950b699aab2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1731883834875 2024-11-17T22:50:36,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/45a248005b42499090b76cf009ea3f1c is 1080, key is row0226/info:/1731883836892/Put/seqid=0 2024-11-17T22:50:36,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741875_1051 (size=19013) 2024-11-17T22:50:36,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/45a248005b42499090b76cf009ea3f1c 2024-11-17T22:50:36,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741875_1051 (size=19013) 2024-11-17T22:50:36,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/45a248005b42499090b76cf009ea3f1c as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/45a248005b42499090b76cf009ea3f1c 2024-11-17T22:50:36,952 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2071f94e963317acec39cea472aff95f#info#compaction#89 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:36,953 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/fbb387d87e224edd8e973ade9480d4c4 is 1080, key is row0062/info:/1731883814417/Put/seqid=0 2024-11-17T22:50:36,962 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/45a248005b42499090b76cf009ea3f1c, entries=13, sequenceid=317, filesize=18.6 K 2024-11-17T22:50:36,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=14.71 KB/15064 for 2071f94e963317acec39cea472aff95f in 40ms, sequenceid=317, compaction requested=false 2024-11-17T22:50:36,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] regionserver.HRegion(8855): Flush requested on 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:36,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-17T22:50:36,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741876_1052 (size=183053) 2024-11-17T22:50:36,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741876_1052 (size=183053) 2024-11-17T22:50:36,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/69423c615fca4afc8ca7551fdcaf4d8d is 1080, key is row0239/info:/1731883836924/Put/seqid=0 2024-11-17T22:50:36,976 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/fbb387d87e224edd8e973ade9480d4c4 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fbb387d87e224edd8e973ade9480d4c4 2024-11-17T22:50:36,982 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2071f94e963317acec39cea472aff95f/info of 2071f94e963317acec39cea472aff95f into fbb387d87e224edd8e973ade9480d4c4(size=178.8 K), total size for store is 197.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:36,982 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:36,983 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., storeName=2071f94e963317acec39cea472aff95f/info, priority=13, startTime=1731883836923; duration=0sec 2024-11-17T22:50:36,983 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:36,983 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2071f94e963317acec39cea472aff95f:info 2024-11-17T22:50:36,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741877_1053 (size=22254) 2024-11-17T22:50:36,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741877_1053 (size=22254) 2024-11-17T22:50:36,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/69423c615fca4afc8ca7551fdcaf4d8d 2024-11-17T22:50:36,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/69423c615fca4afc8ca7551fdcaf4d8d as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/69423c615fca4afc8ca7551fdcaf4d8d 2024-11-17T22:50:37,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/69423c615fca4afc8ca7551fdcaf4d8d, entries=16, sequenceid=336, filesize=21.7 K 2024-11-17T22:50:37,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=2.10 KB/2152 for 2071f94e963317acec39cea472aff95f in 39ms, sequenceid=336, compaction requested=true 2024-11-17T22:50:37,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:37,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2071f94e963317acec39cea472aff95f:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T22:50:37,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:37,004 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T22:50:37,006 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 224320 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T22:50:37,006 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1541): 2071f94e963317acec39cea472aff95f/info is initiating minor compaction (all files) 2024-11-17T22:50:37,006 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2071f94e963317acec39cea472aff95f/info in TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:37,006 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fbb387d87e224edd8e973ade9480d4c4, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/45a248005b42499090b76cf009ea3f1c, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/69423c615fca4afc8ca7551fdcaf4d8d] into tmpdir=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp, totalSize=219.1 K 2024-11-17T22:50:37,006 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting fbb387d87e224edd8e973ade9480d4c4, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1731883814417 2024-11-17T22:50:37,007 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 45a248005b42499090b76cf009ea3f1c, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1731883836892 2024-11-17T22:50:37,007 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 69423c615fca4afc8ca7551fdcaf4d8d, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1731883836924 2024-11-17T22:50:37,023 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2071f94e963317acec39cea472aff95f#info#compaction#91 average throughput is 49.51 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T22:50:37,023 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/bb85ab97714048de99d7d2dbe18a8f2b is 1080, key is row0062/info:/1731883814417/Put/seqid=0 2024-11-17T22:50:37,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741878_1054 (size=214523) 2024-11-17T22:50:37,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741878_1054 (size=214523) 2024-11-17T22:50:37,042 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/bb85ab97714048de99d7d2dbe18a8f2b as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/bb85ab97714048de99d7d2dbe18a8f2b 2024-11-17T22:50:37,048 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2071f94e963317acec39cea472aff95f/info of 2071f94e963317acec39cea472aff95f into bb85ab97714048de99d7d2dbe18a8f2b(size=209.5 K), total size for store is 209.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T22:50:37,048 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:37,048 INFO [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., storeName=2071f94e963317acec39cea472aff95f/info, priority=13, startTime=1731883837004; duration=0sec 2024-11-17T22:50:37,048 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T22:50:37,048 DEBUG [RS:0;1a6e40b21a48:35999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2071f94e963317acec39cea472aff95f:info 2024-11-17T22:50:37,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:37,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:37,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:38,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:38,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:38,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:38,973 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-17T22:50:38,974 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C35999%2C1731883800706.1731883838974 2024-11-17T22:50:38,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:38,985 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:38,985 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:38,985 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:38,986 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:38,986 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706/1a6e40b21a48%2C35999%2C1731883800706.1731883801537 with entries=318, filesize=310.38 KB; new WAL /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706/1a6e40b21a48%2C35999%2C1731883800706.1731883838974 2024-11-17T22:50:38,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741833_1009 (size=317837) 2024-11-17T22:50:38,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741833_1009 (size=317837) 2024-11-17T22:50:38,989 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37525:37525),(127.0.0.1/127.0.0.1:43219:43219)] 2024-11-17T22:50:38,994 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a2911dcc4e336c4898f6e0e65ebebe40: 2024-11-17T22:50:38,994 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2071f94e963317acec39cea472aff95f 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-17T22:50:38,998 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/d54fe30eadcc49729f4d26f5a7a45e69 is 1080, key is row0255/info:/1731883836967/Put/seqid=0 2024-11-17T22:50:39,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741880_1056 (size=7116) 2024-11-17T22:50:39,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741880_1056 (size=7116) 2024-11-17T22:50:39,003 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/d54fe30eadcc49729f4d26f5a7a45e69 2024-11-17T22:50:39,008 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/.tmp/info/d54fe30eadcc49729f4d26f5a7a45e69 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/d54fe30eadcc49729f4d26f5a7a45e69 2024-11-17T22:50:39,013 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/d54fe30eadcc49729f4d26f5a7a45e69, entries=2, sequenceid=343, filesize=6.9 K 2024-11-17T22:50:39,014 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 2071f94e963317acec39cea472aff95f in 20ms, sequenceid=343, compaction requested=false 2024-11-17T22:50:39,014 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2071f94e963317acec39cea472aff95f: 2024-11-17T22:50:39,014 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-17T22:50:39,018 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/info/cf2f1a164e1f4ac982beecec7511edab is 193, key is TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f./info:regioninfo/1731883817452/Put/seqid=0 2024-11-17T22:50:39,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741881_1057 (size=6223) 2024-11-17T22:50:39,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741881_1057 (size=6223) 2024-11-17T22:50:39,022 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/info/cf2f1a164e1f4ac982beecec7511edab 2024-11-17T22:50:39,027 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/.tmp/info/cf2f1a164e1f4ac982beecec7511edab as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/info/cf2f1a164e1f4ac982beecec7511edab 2024-11-17T22:50:39,031 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/info/cf2f1a164e1f4ac982beecec7511edab, entries=5, sequenceid=21, filesize=6.1 K 2024-11-17T22:50:39,032 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-11-17T22:50:39,032 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T22:50:39,032 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C35999%2C1731883800706.1731883839032 2024-11-17T22:50:39,036 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,036 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,036 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,037 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706/1a6e40b21a48%2C35999%2C1731883800706.1731883838974 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706/1a6e40b21a48%2C35999%2C1731883800706.1731883839032 2024-11-17T22:50:39,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741879_1055 (size=731) 2024-11-17T22:50:39,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741879_1055 (size=731) 2024-11-17T22:50:39,039 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706/1a6e40b21a48%2C35999%2C1731883800706.1731883801537 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/oldWALs/1a6e40b21a48%2C35999%2C1731883800706.1731883801537 2024-11-17T22:50:39,039 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43219:43219),(127.0.0.1/127.0.0.1:37525:37525)] 2024-11-17T22:50:39,040 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T22:50:39,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T22:50:39,040 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:50:39,040 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:50:39,040 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/WALs/1a6e40b21a48,35999,1731883800706/1a6e40b21a48%2C35999%2C1731883800706.1731883838974 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/oldWALs/1a6e40b21a48%2C35999%2C1731883800706.1731883838974 2024-11-17T22:50:39,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:39,041 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:39,041 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T22:50:39,041 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1737534740, stopped=false 2024-11-17T22:50:39,041 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a6e40b21a48,46683,1731883800648 2024-11-17T22:50:39,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:50:39,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:39,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:50:39,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:39,042 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:50:39,043 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:50:39,043 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T22:50:39,043 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:50:39,043 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:50:39,043 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:50:39,043 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:39,044 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a6e40b21a48,35999,1731883800706' ***** 2024-11-17T22:50:39,044 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T22:50:39,044 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(3091): Received CLOSE for a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(3091): Received CLOSE for 2071f94e963317acec39cea472aff95f 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(959): stopping server 1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:50:39,044 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a2911dcc4e336c4898f6e0e65ebebe40, disabling compactions & flushes 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a6e40b21a48:35999. 2024-11-17T22:50:39,044 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:39,044 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:39,044 DEBUG [RS:0;1a6e40b21a48:35999 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:50:39,044 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. after waiting 0 ms 2024-11-17T22:50:39,044 DEBUG [RS:0;1a6e40b21a48:35999 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:39,044 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:39,044 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T22:50:39,045 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T22:50:39,045 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T22:50:39,045 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T22:50:39,045 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-17T22:50:39,045 DEBUG [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1325): Online Regions={a2911dcc4e336c4898f6e0e65ebebe40=TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40., 2071f94e963317acec39cea472aff95f=TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f., 1588230740=hbase:meta,,1.1588230740} 2024-11-17T22:50:39,045 DEBUG [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2071f94e963317acec39cea472aff95f, a2911dcc4e336c4898f6e0e65ebebe40 2024-11-17T22:50:39,045 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:50:39,045 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:50:39,045 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:50:39,045 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:50:39,045 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:50:39,045 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156->hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465-bottom] to archive 2024-11-17T22:50:39,046 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T22:50:39,047 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:39,048 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a6e40b21a48:46683 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-17T22:50:39,048 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-17T22:50:39,052 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-17T22:50:39,052 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/a2911dcc4e336c4898f6e0e65ebebe40/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:50:39,053 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:39,053 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a2911dcc4e336c4898f6e0e65ebebe40: Waiting for close lock at 1731883839044Running coprocessor pre-close hooks at 1731883839044Disabling compacts and flushes for region at 1731883839044Disabling writes for close at 1731883839044Writing region close event to WAL at 1731883839049 (+5 ms)Running coprocessor post-close hooks at 1731883839053 (+4 ms)Closed at 1731883839053 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883839045Running coprocessor pre-close hooks at 1731883839045Disabling compacts and flushes for region at 1731883839045Disabling writes for close at 1731883839045Writing region close event to WAL at 1731883839050 (+5 ms)Running coprocessor post-close hooks at 1731883839053 (+3 ms)Closed at 1731883839053 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731883816520.a2911dcc4e336c4898f6e0e65ebebe40. 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2071f94e963317acec39cea472aff95f, disabling compactions & flushes 2024-11-17T22:50:39,053 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. after waiting 0 ms 2024-11-17T22:50:39,053 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:39,054 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156->hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/1f4b8723a07f669c388b5143f2a00156/info/fc203db1c0f8408d802962aa6d629465-top, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-e3d1fc1158424abcbdafe261f4cdafe9, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/34d9f4a1a6524f01a7f7665edc6a6ed3, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-1ae807f572514264873b574936688d7a, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/a5b3dacc87de4626b02559365c2b1c05, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/6a3c59d0b6c344c2a0d8ea2a398436be, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/54953a3ea39944c287cd7c73695bb247, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/67852e8db3e04b9a99ef00275c917bae, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/453b100f66d8409cbdd7e299844b56a7, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9ecf09abef9a49cabd42631a97b4141f, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dacdb9051cfb40c897058b7f1c6908a7, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/5e6d61ce027f4001b5b6c7dd28f52c11, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dae5c868c75f4de69b8e24bad968859a, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/8b9a924adc814a67b9913bf667d8f062, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9fa7df2936eb4fa89a61d202b1559f0a, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/c275f4f7aa1c4ac3bbea51cd32f94812, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/aa06e2e9ba234ebf9e8c3aec4b6f4095, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/add8902209e34c42b5ae78a46a857784, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/effa810d11e946a18a1fe36ea9e8a112, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9342657d51b54ad2822644bd5429ccf3, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fbb387d87e224edd8e973ade9480d4c4, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/4970d8b545074bda863a1950b699aab2, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/45a248005b42499090b76cf009ea3f1c, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/69423c615fca4afc8ca7551fdcaf4d8d] to archive 2024-11-17T22:50:39,055 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T22:50:39,056 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fc203db1c0f8408d802962aa6d629465.1f4b8723a07f669c388b5143f2a00156 2024-11-17T22:50:39,058 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-e3d1fc1158424abcbdafe261f4cdafe9 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-e3d1fc1158424abcbdafe261f4cdafe9 2024-11-17T22:50:39,059 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/34d9f4a1a6524f01a7f7665edc6a6ed3 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/34d9f4a1a6524f01a7f7665edc6a6ed3 2024-11-17T22:50:39,060 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-1ae807f572514264873b574936688d7a to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/TestLogRolling-testLogRolling=1f4b8723a07f669c388b5143f2a00156-1ae807f572514264873b574936688d7a 2024-11-17T22:50:39,061 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/a5b3dacc87de4626b02559365c2b1c05 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/a5b3dacc87de4626b02559365c2b1c05 2024-11-17T22:50:39,062 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/6a3c59d0b6c344c2a0d8ea2a398436be to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/6a3c59d0b6c344c2a0d8ea2a398436be 2024-11-17T22:50:39,063 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/54953a3ea39944c287cd7c73695bb247 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/54953a3ea39944c287cd7c73695bb247 2024-11-17T22:50:39,064 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/67852e8db3e04b9a99ef00275c917bae to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/67852e8db3e04b9a99ef00275c917bae 2024-11-17T22:50:39,065 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/453b100f66d8409cbdd7e299844b56a7 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/453b100f66d8409cbdd7e299844b56a7 2024-11-17T22:50:39,067 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9ecf09abef9a49cabd42631a97b4141f to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9ecf09abef9a49cabd42631a97b4141f 2024-11-17T22:50:39,068 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dacdb9051cfb40c897058b7f1c6908a7 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dacdb9051cfb40c897058b7f1c6908a7 2024-11-17T22:50:39,069 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/5e6d61ce027f4001b5b6c7dd28f52c11 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/5e6d61ce027f4001b5b6c7dd28f52c11 2024-11-17T22:50:39,070 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dae5c868c75f4de69b8e24bad968859a to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/dae5c868c75f4de69b8e24bad968859a 2024-11-17T22:50:39,072 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/8b9a924adc814a67b9913bf667d8f062 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/8b9a924adc814a67b9913bf667d8f062 2024-11-17T22:50:39,073 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9fa7df2936eb4fa89a61d202b1559f0a to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9fa7df2936eb4fa89a61d202b1559f0a 2024-11-17T22:50:39,074 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/c275f4f7aa1c4ac3bbea51cd32f94812 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/c275f4f7aa1c4ac3bbea51cd32f94812 2024-11-17T22:50:39,075 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/aa06e2e9ba234ebf9e8c3aec4b6f4095 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/aa06e2e9ba234ebf9e8c3aec4b6f4095 2024-11-17T22:50:39,076 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/add8902209e34c42b5ae78a46a857784 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/add8902209e34c42b5ae78a46a857784 2024-11-17T22:50:39,077 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/effa810d11e946a18a1fe36ea9e8a112 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/effa810d11e946a18a1fe36ea9e8a112 2024-11-17T22:50:39,078 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9342657d51b54ad2822644bd5429ccf3 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/9342657d51b54ad2822644bd5429ccf3 2024-11-17T22:50:39,079 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fbb387d87e224edd8e973ade9480d4c4 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/fbb387d87e224edd8e973ade9480d4c4 2024-11-17T22:50:39,080 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/4970d8b545074bda863a1950b699aab2 to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/4970d8b545074bda863a1950b699aab2 2024-11-17T22:50:39,081 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/45a248005b42499090b76cf009ea3f1c to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/45a248005b42499090b76cf009ea3f1c 2024-11-17T22:50:39,082 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/69423c615fca4afc8ca7551fdcaf4d8d to hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/archive/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/info/69423c615fca4afc8ca7551fdcaf4d8d 2024-11-17T22:50:39,082 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [34d9f4a1a6524f01a7f7665edc6a6ed3=42984, a5b3dacc87de4626b02559365c2b1c05=12516, 6a3c59d0b6c344c2a0d8ea2a398436be=64714, 54953a3ea39944c287cd7c73695bb247=19000, 67852e8db3e04b9a99ef00275c917bae=17906, 453b100f66d8409cbdd7e299844b56a7=85371, 9ecf09abef9a49cabd42631a97b4141f=12516, dacdb9051cfb40c897058b7f1c6908a7=19000, 5e6d61ce027f4001b5b6c7dd28f52c11=113509, dae5c868c75f4de69b8e24bad968859a=19000, 8b9a924adc814a67b9913bf667d8f062=16828, 9fa7df2936eb4fa89a61d202b1559f0a=136381, c275f4f7aa1c4ac3bbea51cd32f94812=15750, aa06e2e9ba234ebf9e8c3aec4b6f4095=16829, add8902209e34c42b5ae78a46a857784=162446, effa810d11e946a18a1fe36ea9e8a112=19013, 9342657d51b54ad2822644bd5429ccf3=17918, fbb387d87e224edd8e973ade9480d4c4=183053, 4970d8b545074bda863a1950b699aab2=12523, 45a248005b42499090b76cf009ea3f1c=19013, 69423c615fca4afc8ca7551fdcaf4d8d=22254] 2024-11-17T22:50:39,085 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/data/default/TestLogRolling-testLogRolling/2071f94e963317acec39cea472aff95f/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=130 2024-11-17T22:50:39,086 INFO [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:39,086 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2071f94e963317acec39cea472aff95f: Waiting for close lock at 1731883839053Running coprocessor pre-close hooks at 1731883839053Disabling compacts and flushes for region at 1731883839053Disabling writes for close at 1731883839053Writing region close event to WAL at 1731883839082 (+29 ms)Running coprocessor post-close hooks at 1731883839086 (+4 ms)Closed at 1731883839086 2024-11-17T22:50:39,087 DEBUG [RS_CLOSE_REGION-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731883816520.2071f94e963317acec39cea472aff95f. 2024-11-17T22:50:39,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:39,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:39,245 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(976): stopping server 1a6e40b21a48,35999,1731883800706; all regions closed. 2024-11-17T22:50:39,246 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,246 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,246 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,246 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,246 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741834_1010 (size=8107) 2024-11-17T22:50:39,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741834_1010 (size=8107) 2024-11-17T22:50:39,252 DEBUG [RS:0;1a6e40b21a48:35999 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/oldWALs 2024-11-17T22:50:39,253 INFO [RS:0;1a6e40b21a48:35999 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C35999%2C1731883800706.meta:.meta(num 1731883801997) 2024-11-17T22:50:39,253 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,253 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,253 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,254 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,254 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741882_1058 (size=780) 2024-11-17T22:50:39,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741882_1058 (size=780) 2024-11-17T22:50:39,260 DEBUG [RS:0;1a6e40b21a48:35999 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/oldWALs 2024-11-17T22:50:39,260 INFO [RS:0;1a6e40b21a48:35999 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C35999%2C1731883800706:(num 1731883839032) 2024-11-17T22:50:39,260 DEBUG [RS:0;1a6e40b21a48:35999 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:39,260 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:50:39,260 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:50:39,261 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.ChoreService(370): Chore service for: regionserver/1a6e40b21a48:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T22:50:39,261 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:50:39,261 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:50:39,261 INFO [RS:0;1a6e40b21a48:35999 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35999 2024-11-17T22:50:39,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a6e40b21a48,35999,1731883800706 2024-11-17T22:50:39,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:50:39,263 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:50:39,264 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a6e40b21a48,35999,1731883800706] 2024-11-17T22:50:39,264 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a6e40b21a48,35999,1731883800706 already deleted, retry=false 2024-11-17T22:50:39,265 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a6e40b21a48,35999,1731883800706 expired; onlineServers=0 2024-11-17T22:50:39,265 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a6e40b21a48,46683,1731883800648' ***** 2024-11-17T22:50:39,265 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T22:50:39,265 INFO [M:0;1a6e40b21a48:46683 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:50:39,265 INFO [M:0;1a6e40b21a48:46683 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:50:39,265 DEBUG [M:0;1a6e40b21a48:46683 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T22:50:39,265 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T22:50:39,265 DEBUG [M:0;1a6e40b21a48:46683 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T22:50:39,265 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883801334 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883801334,5,FailOnTimeoutGroup] 2024-11-17T22:50:39,265 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883801337 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883801337,5,FailOnTimeoutGroup] 2024-11-17T22:50:39,265 INFO [M:0;1a6e40b21a48:46683 {}] hbase.ChoreService(370): Chore service for: master/1a6e40b21a48:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T22:50:39,265 INFO [M:0;1a6e40b21a48:46683 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:50:39,265 DEBUG [M:0;1a6e40b21a48:46683 {}] master.HMaster(1795): Stopping service threads 2024-11-17T22:50:39,265 INFO [M:0;1a6e40b21a48:46683 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T22:50:39,266 INFO [M:0;1a6e40b21a48:46683 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:50:39,266 INFO [M:0;1a6e40b21a48:46683 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T22:50:39,266 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T22:50:39,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T22:50:39,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:39,266 DEBUG [M:0;1a6e40b21a48:46683 {}] zookeeper.ZKUtil(347): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T22:50:39,266 WARN [M:0;1a6e40b21a48:46683 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T22:50:39,267 INFO [M:0;1a6e40b21a48:46683 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/.lastflushedseqids 2024-11-17T22:50:39,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741883_1059 (size=228) 2024-11-17T22:50:39,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741883_1059 (size=228) 2024-11-17T22:50:39,271 INFO [M:0;1a6e40b21a48:46683 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T22:50:39,271 INFO [M:0;1a6e40b21a48:46683 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T22:50:39,272 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:50:39,272 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:39,272 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:39,272 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:50:39,272 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:39,272 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.38 KB 2024-11-17T22:50:39,287 DEBUG [M:0;1a6e40b21a48:46683 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e426c4cb7bb493e8ea3c176806c15f3 is 82, key is hbase:meta,,1/info:regioninfo/1731883802068/Put/seqid=0 2024-11-17T22:50:39,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741884_1060 (size=5672) 2024-11-17T22:50:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741884_1060 (size=5672) 2024-11-17T22:50:39,292 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e426c4cb7bb493e8ea3c176806c15f3 2024-11-17T22:50:39,311 DEBUG [M:0;1a6e40b21a48:46683 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a94892e0aee44e4b9845d9637a0ec6f is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731883802553/Put/seqid=0 2024-11-17T22:50:39,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741885_1061 (size=7090) 2024-11-17T22:50:39,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741885_1061 (size=7090) 2024-11-17T22:50:39,316 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a94892e0aee44e4b9845d9637a0ec6f 2024-11-17T22:50:39,320 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9a94892e0aee44e4b9845d9637a0ec6f 2024-11-17T22:50:39,334 DEBUG [M:0;1a6e40b21a48:46683 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aff3019e5da148c4b4f6e7fa0caf5270 is 69, key is 1a6e40b21a48,35999,1731883800706/rs:state/1731883801370/Put/seqid=0 2024-11-17T22:50:39,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741886_1062 (size=5156) 2024-11-17T22:50:39,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741886_1062 (size=5156) 2024-11-17T22:50:39,338 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aff3019e5da148c4b4f6e7fa0caf5270 2024-11-17T22:50:39,357 DEBUG [M:0;1a6e40b21a48:46683 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b5bbd2371d544b3b78038f7f4865e96 is 52, key is load_balancer_on/state:d/1731883802166/Put/seqid=0 2024-11-17T22:50:39,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741887_1063 (size=5056) 2024-11-17T22:50:39,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741887_1063 (size=5056) 2024-11-17T22:50:39,362 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b5bbd2371d544b3b78038f7f4865e96 2024-11-17T22:50:39,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:50:39,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35999-0x1004fe0f4a40001, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:50:39,364 INFO [RS:0;1a6e40b21a48:35999 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:50:39,364 INFO [RS:0;1a6e40b21a48:35999 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a6e40b21a48,35999,1731883800706; zookeeper connection closed. 2024-11-17T22:50:39,364 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@515a2b06 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@515a2b06 2024-11-17T22:50:39,364 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T22:50:39,367 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e426c4cb7bb493e8ea3c176806c15f3 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7e426c4cb7bb493e8ea3c176806c15f3 2024-11-17T22:50:39,372 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7e426c4cb7bb493e8ea3c176806c15f3, entries=8, sequenceid=125, filesize=5.5 K 2024-11-17T22:50:39,373 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a94892e0aee44e4b9845d9637a0ec6f as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9a94892e0aee44e4b9845d9637a0ec6f 2024-11-17T22:50:39,377 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9a94892e0aee44e4b9845d9637a0ec6f 2024-11-17T22:50:39,377 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9a94892e0aee44e4b9845d9637a0ec6f, entries=13, sequenceid=125, filesize=6.9 K 2024-11-17T22:50:39,378 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aff3019e5da148c4b4f6e7fa0caf5270 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aff3019e5da148c4b4f6e7fa0caf5270 2024-11-17T22:50:39,382 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aff3019e5da148c4b4f6e7fa0caf5270, entries=1, sequenceid=125, filesize=5.0 K 2024-11-17T22:50:39,383 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b5bbd2371d544b3b78038f7f4865e96 as hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2b5bbd2371d544b3b78038f7f4865e96 2024-11-17T22:50:39,387 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/7c620681-45e8-d888-497b-19c0d5a2ec9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2b5bbd2371d544b3b78038f7f4865e96, entries=1, sequenceid=125, filesize=4.9 K 2024-11-17T22:50:39,388 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=125, compaction requested=false 2024-11-17T22:50:39,389 INFO [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:39,389 DEBUG [M:0;1a6e40b21a48:46683 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883839272Disabling compacts and flushes for region at 1731883839272Disabling writes for close at 1731883839272Obtaining lock to block concurrent updates at 1731883839272Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731883839272Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64840, getOffHeapSize=0, getCellsCount=148 at 1731883839272Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731883839273 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731883839273Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731883839287 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731883839287Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731883839296 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731883839310 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731883839310Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731883839320 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731883839333 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731883839333Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731883839343 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731883839357 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731883839357Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dffd319: reopening flushed file at 1731883839366 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b16535a: reopening flushed file at 1731883839372 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56b23084: reopening flushed file at 1731883839377 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49d7ae4f: reopening flushed file at 1731883839382 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=125, compaction requested=false at 1731883839388 (+6 ms)Writing region close event to WAL at 1731883839389 (+1 ms)Closed at 1731883839389 2024-11-17T22:50:39,389 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,389 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,390 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,390 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,390 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:39,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741830_1006 (size=61332) 2024-11-17T22:50:39,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34851 is added to blk_1073741830_1006 (size=61332) 2024-11-17T22:50:39,392 INFO [M:0;1a6e40b21a48:46683 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T22:50:39,392 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:50:39,392 INFO [M:0;1a6e40b21a48:46683 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46683 2024-11-17T22:50:39,392 INFO [M:0;1a6e40b21a48:46683 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:50:39,402 INFO [regionserver/1a6e40b21a48:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:50:39,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:50:39,493 INFO [M:0;1a6e40b21a48:46683 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:50:39,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46683-0x1004fe0f4a40000, quorum=127.0.0.1:59694, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:50:39,496 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9612b29{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:50:39,496 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@314e7370{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:50:39,496 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:50:39,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dab95de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:50:39,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7305dd28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.log.dir/,STOPPED} 2024-11-17T22:50:39,498 WARN [BP-1786302858-172.17.0.2-1731883800009 heartbeating to localhost/127.0.0.1:46027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:50:39,498 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:50:39,498 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:50:39,498 WARN [BP-1786302858-172.17.0.2-1731883800009 heartbeating to localhost/127.0.0.1:46027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1786302858-172.17.0.2-1731883800009 (Datanode Uuid c74d30f6-3bc6-4903-89ca-5f82b0017218) service to localhost/127.0.0.1:46027 2024-11-17T22:50:39,499 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/data/data3/current/BP-1786302858-172.17.0.2-1731883800009 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:50:39,499 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/data/data4/current/BP-1786302858-172.17.0.2-1731883800009 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:50:39,499 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:50:39,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45890504{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:50:39,501 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d639fc0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:50:39,501 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:50:39,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53cff5cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:50:39,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a48d3d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.log.dir/,STOPPED} 2024-11-17T22:50:39,503 WARN [BP-1786302858-172.17.0.2-1731883800009 heartbeating to localhost/127.0.0.1:46027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:50:39,503 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:50:39,503 WARN [BP-1786302858-172.17.0.2-1731883800009 heartbeating to localhost/127.0.0.1:46027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1786302858-172.17.0.2-1731883800009 (Datanode Uuid 4d2c2aa8-b30f-44ed-9614-ff46a008aa86) service to localhost/127.0.0.1:46027 2024-11-17T22:50:39,503 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:50:39,504 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/data/data1/current/BP-1786302858-172.17.0.2-1731883800009 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:50:39,504 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/cluster_7e2e05da-ae2c-c550-d8c9-e44748f5b97d/data/data2/current/BP-1786302858-172.17.0.2-1731883800009 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:50:39,504 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:50:39,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52d230c9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:50:39,511 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@240fc28c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:50:39,511 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:50:39,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56433553{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:50:39,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10ce7a76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.log.dir/,STOPPED} 2024-11-17T22:50:39,517 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T22:50:39,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T22:50:39,557 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 206) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46027 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:46027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=99 (was 60) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3968 (was 5128) 2024-11-17T22:50:39,566 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=99, ProcessCount=11, AvailableMemoryMB=3967 2024-11-17T22:50:39,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T22:50:39,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.log.dir so I do NOT create it in target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e 2024-11-17T22:50:39,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a2d04408-ee1e-8938-d2e4-461407f78f86/hadoop.tmp.dir so I do NOT create it in target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e 2024-11-17T22:50:39,566 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4, deleteOnExit=true 2024-11-17T22:50:39,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T22:50:39,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/test.cache.data in system properties and HBase conf 2024-11-17T22:50:39,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T22:50:39,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/hadoop.log.dir in system properties and HBase conf 2024-11-17T22:50:39,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T22:50:39,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T22:50:39,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T22:50:39,567 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T22:50:39,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/nfs.dump.dir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/java.io.tmpdir in system properties and HBase conf 2024-11-17T22:50:39,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T22:50:39,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T22:50:39,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T22:50:39,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:39,586 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:50:39,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:50:39,638 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:50:39,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:50:39,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:50:39,641 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:50:39,643 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:50:39,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@251f308d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:50:39,644 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31a449e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:50:39,734 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2394ff19{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/java.io.tmpdir/jetty-localhost-44139-hadoop-hdfs-3_4_1-tests_jar-_-any-13350278573352308771/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:50:39,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34b79769{HTTP/1.1, (http/1.1)}{localhost:44139} 2024-11-17T22:50:39,735 INFO [Time-limited test {}] server.Server(415): Started @280759ms 2024-11-17T22:50:39,746 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T22:50:39,786 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:50:39,789 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:50:39,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:50:39,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:50:39,790 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T22:50:39,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c3c893{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:50:39,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d0f6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:50:39,885 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5bfccd5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/java.io.tmpdir/jetty-localhost-39963-hadoop-hdfs-3_4_1-tests_jar-_-any-15804687500899878927/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:50:39,885 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@444363ed{HTTP/1.1, (http/1.1)}{localhost:39963} 2024-11-17T22:50:39,885 INFO [Time-limited test {}] server.Server(415): Started @280909ms 2024-11-17T22:50:39,886 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:50:39,910 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T22:50:39,912 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T22:50:39,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T22:50:39,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T22:50:39,913 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T22:50:39,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e09407e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/hadoop.log.dir/,AVAILABLE} 2024-11-17T22:50:39,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@377e4a58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T22:50:39,941 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/data/data1/current/BP-930226705-172.17.0.2-1731883839589/current, will proceed with Du for space computation calculation, 2024-11-17T22:50:39,941 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/data/data2/current/BP-930226705-172.17.0.2-1731883839589/current, will proceed with Du for space computation calculation, 2024-11-17T22:50:39,977 WARN [Thread-2473 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:50:39,980 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd81a165e914cf659 with lease ID 0xb901c875d8b2d057: Processing first storage report for DS-7acc6389-3f85-407d-af66-e64d827eda7b from datanode DatanodeRegistration(127.0.0.1:40435, datanodeUuid=26aa8420-78a0-4cd1-a3f4-0d30864b4921, infoPort=43459, infoSecurePort=0, ipcPort=44341, storageInfo=lv=-57;cid=testClusterID;nsid=485874732;c=1731883839589) 2024-11-17T22:50:39,980 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd81a165e914cf659 with lease ID 0xb901c875d8b2d057: from storage DS-7acc6389-3f85-407d-af66-e64d827eda7b node DatanodeRegistration(127.0.0.1:40435, datanodeUuid=26aa8420-78a0-4cd1-a3f4-0d30864b4921, infoPort=43459, infoSecurePort=0, ipcPort=44341, storageInfo=lv=-57;cid=testClusterID;nsid=485874732;c=1731883839589), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:50:39,980 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd81a165e914cf659 with lease ID 0xb901c875d8b2d057: Processing first storage report for DS-a2768e1e-31a4-4cae-81ca-ed59bd668514 from datanode DatanodeRegistration(127.0.0.1:40435, datanodeUuid=26aa8420-78a0-4cd1-a3f4-0d30864b4921, infoPort=43459, infoSecurePort=0, ipcPort=44341, storageInfo=lv=-57;cid=testClusterID;nsid=485874732;c=1731883839589) 2024-11-17T22:50:39,980 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd81a165e914cf659 with lease ID 0xb901c875d8b2d057: from storage DS-a2768e1e-31a4-4cae-81ca-ed59bd668514 node DatanodeRegistration(127.0.0.1:40435, datanodeUuid=26aa8420-78a0-4cd1-a3f4-0d30864b4921, infoPort=43459, infoSecurePort=0, ipcPort=44341, storageInfo=lv=-57;cid=testClusterID;nsid=485874732;c=1731883839589), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T22:50:40,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@352a3917{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/java.io.tmpdir/jetty-localhost-41997-hadoop-hdfs-3_4_1-tests_jar-_-any-1179401750537115926/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:50:40,028 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@349fe29e{HTTP/1.1, (http/1.1)}{localhost:41997} 2024-11-17T22:50:40,028 INFO [Time-limited test {}] server.Server(415): Started @281052ms 2024-11-17T22:50:40,029 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T22:50:40,086 WARN [Thread-2520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/data/data3/current/BP-930226705-172.17.0.2-1731883839589/current, will proceed with Du for space computation calculation, 2024-11-17T22:50:40,086 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/data/data4/current/BP-930226705-172.17.0.2-1731883839589/current, will proceed with Du for space computation calculation, 2024-11-17T22:50:40,104 WARN [Thread-2509 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T22:50:40,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbdc8762d3509bb0d with lease ID 0xb901c875d8b2d058: Processing first storage report for DS-195b116c-0bad-4141-994e-f68139a865ad from datanode DatanodeRegistration(127.0.0.1:34081, datanodeUuid=4d936ee7-7c32-430e-adb6-87b9b20917aa, infoPort=33051, infoSecurePort=0, ipcPort=44661, storageInfo=lv=-57;cid=testClusterID;nsid=485874732;c=1731883839589) 2024-11-17T22:50:40,106 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbdc8762d3509bb0d with lease ID 0xb901c875d8b2d058: from storage DS-195b116c-0bad-4141-994e-f68139a865ad node DatanodeRegistration(127.0.0.1:34081, datanodeUuid=4d936ee7-7c32-430e-adb6-87b9b20917aa, infoPort=33051, infoSecurePort=0, ipcPort=44661, storageInfo=lv=-57;cid=testClusterID;nsid=485874732;c=1731883839589), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:50:40,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbdc8762d3509bb0d with lease ID 0xb901c875d8b2d058: Processing first storage report for DS-089ce1f5-0abc-4c7d-9894-4ea7c6405c6e from datanode DatanodeRegistration(127.0.0.1:34081, datanodeUuid=4d936ee7-7c32-430e-adb6-87b9b20917aa, infoPort=33051, infoSecurePort=0, ipcPort=44661, storageInfo=lv=-57;cid=testClusterID;nsid=485874732;c=1731883839589) 2024-11-17T22:50:40,106 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbdc8762d3509bb0d with lease ID 0xb901c875d8b2d058: from storage DS-089ce1f5-0abc-4c7d-9894-4ea7c6405c6e node DatanodeRegistration(127.0.0.1:34081, datanodeUuid=4d936ee7-7c32-430e-adb6-87b9b20917aa, infoPort=33051, infoSecurePort=0, ipcPort=44661, storageInfo=lv=-57;cid=testClusterID;nsid=485874732;c=1731883839589), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T22:50:40,151 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e 2024-11-17T22:50:40,156 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/zookeeper_0, clientPort=63796, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T22:50:40,157 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63796 2024-11-17T22:50:40,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:40,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:40,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:40,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:40,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:50:40,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741825_1001 (size=7) 2024-11-17T22:50:40,174 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7 with version=8 2024-11-17T22:50:40,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40071/user/jenkins/test-data/88bba42c-4354-4273-220a-eda53115579c/hbase-staging 2024-11-17T22:50:40,176 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:50:40,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:40,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:40,177 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:50:40,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:40,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:50:40,177 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T22:50:40,177 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:50:40,178 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44073 2024-11-17T22:50:40,179 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44073 connecting to ZooKeeper ensemble=127.0.0.1:63796 2024-11-17T22:50:40,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:440730x0, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:50:40,183 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44073-0x1004fe18f090000 connected 2024-11-17T22:50:40,193 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:40,194 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:40,196 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:50:40,196 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7, hbase.cluster.distributed=false 2024-11-17T22:50:40,198 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:50:40,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44073 2024-11-17T22:50:40,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44073 2024-11-17T22:50:40,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44073 2024-11-17T22:50:40,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44073 2024-11-17T22:50:40,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44073 2024-11-17T22:50:40,216 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a6e40b21a48:0 server-side Connection retries=45 2024-11-17T22:50:40,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:40,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:40,217 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T22:50:40,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T22:50:40,217 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T22:50:40,217 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T22:50:40,217 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T22:50:40,217 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38273 2024-11-17T22:50:40,218 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38273 connecting to ZooKeeper ensemble=127.0.0.1:63796 2024-11-17T22:50:40,219 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:40,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:40,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:382730x0, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T22:50:40,223 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:382730x0, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:50:40,223 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38273-0x1004fe18f090001 connected 2024-11-17T22:50:40,223 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T22:50:40,224 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T22:50:40,224 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T22:50:40,225 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T22:50:40,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38273 2024-11-17T22:50:40,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38273 2024-11-17T22:50:40,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38273 2024-11-17T22:50:40,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38273 2024-11-17T22:50:40,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38273 2024-11-17T22:50:40,243 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a6e40b21a48:44073 2024-11-17T22:50:40,243 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:40,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:50:40,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:50:40,245 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:40,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T22:50:40,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,246 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T22:50:40,246 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a6e40b21a48,44073,1731883840176 from backup master directory 2024-11-17T22:50:40,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:40,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:50:40,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T22:50:40,247 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:50:40,247 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:40,250 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/hbase.id] with ID: 246b3557-ad9c-4c2b-878b-a56139c800f6 2024-11-17T22:50:40,250 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/.tmp/hbase.id 2024-11-17T22:50:40,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:50:40,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741826_1002 (size=42) 2024-11-17T22:50:40,258 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/.tmp/hbase.id]:[hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/hbase.id] 2024-11-17T22:50:40,269 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:40,269 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T22:50:40,270 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T22:50:40,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:50:40,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741827_1003 (size=196) 2024-11-17T22:50:40,277 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T22:50:40,277 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T22:50:40,278 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:50:40,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:50:40,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741828_1004 (size=1189) 2024-11-17T22:50:40,287 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store 2024-11-17T22:50:40,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:50:40,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741829_1005 (size=34) 2024-11-17T22:50:40,294 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:40,294 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:50:40,294 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:40,294 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:40,294 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:50:40,294 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:40,294 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:40,294 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883840294Disabling compacts and flushes for region at 1731883840294Disabling writes for close at 1731883840294Writing region close event to WAL at 1731883840294Closed at 1731883840294 2024-11-17T22:50:40,295 WARN [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/.initializing 2024-11-17T22:50:40,295 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/WALs/1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:40,297 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C44073%2C1731883840176, suffix=, logDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/WALs/1a6e40b21a48,44073,1731883840176, archiveDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/oldWALs, maxLogs=10 2024-11-17T22:50:40,297 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C44073%2C1731883840176.1731883840297 2024-11-17T22:50:40,301 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/WALs/1a6e40b21a48,44073,1731883840176/1a6e40b21a48%2C44073%2C1731883840176.1731883840297 2024-11-17T22:50:40,302 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43459:43459),(127.0.0.1/127.0.0.1:33051:33051)] 2024-11-17T22:50:40,303 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:50:40,303 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:40,303 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,303 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,304 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T22:50:40,306 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,307 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T22:50:40,307 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:40,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T22:50:40,309 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:40,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T22:50:40,310 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T22:50:40,311 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,312 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,312 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,314 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,314 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,315 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T22:50:40,316 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T22:50:40,318 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:50:40,319 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821083, jitterRate=0.04406137764453888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T22:50:40,320 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731883840303Initializing all the Stores at 1731883840304 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883840304Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883840304Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883840304Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883840304Cleaning up temporary data from old regions at 1731883840314 (+10 ms)Region opened successfully at 1731883840319 (+5 ms) 2024-11-17T22:50:40,320 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T22:50:40,323 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aece446, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:50:40,324 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T22:50:40,324 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T22:50:40,324 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T22:50:40,324 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T22:50:40,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T22:50:40,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T22:50:40,325 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T22:50:40,328 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T22:50:40,329 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T22:50:40,330 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T22:50:40,330 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T22:50:40,331 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T22:50:40,331 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T22:50:40,332 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T22:50:40,332 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T22:50:40,333 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T22:50:40,334 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T22:50:40,335 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T22:50:40,336 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T22:50:40,337 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T22:50:40,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:50:40,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T22:50:40,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,338 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a6e40b21a48,44073,1731883840176, sessionid=0x1004fe18f090000, setting cluster-up flag (Was=false) 2024-11-17T22:50:40,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,343 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T22:50:40,343 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:40,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,348 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T22:50:40,348 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:40,349 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T22:50:40,351 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T22:50:40,351 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T22:50:40,351 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T22:50:40,351 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a6e40b21a48,44073,1731883840176 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T22:50:40,352 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:50:40,352 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:50:40,352 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:50:40,352 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=5, maxPoolSize=5 2024-11-17T22:50:40,352 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a6e40b21a48:0, corePoolSize=10, maxPoolSize=10 2024-11-17T22:50:40,352 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,352 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:50:40,352 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,354 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:50:40,354 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731883870354 2024-11-17T22:50:40,354 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T22:50:40,354 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T22:50:40,354 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T22:50:40,354 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T22:50:40,354 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T22:50:40,354 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T22:50:40,354 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T22:50:40,354 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,355 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T22:50:40,355 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T22:50:40,355 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T22:50:40,355 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T22:50:40,355 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,355 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T22:50:40,355 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883840355,5,FailOnTimeoutGroup] 2024-11-17T22:50:40,355 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T22:50:40,356 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883840355,5,FailOnTimeoutGroup] 2024-11-17T22:50:40,356 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,356 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T22:50:40,356 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,356 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:50:40,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741831_1007 (size=1321) 2024-11-17T22:50:40,361 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T22:50:40,361 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7 2024-11-17T22:50:40,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:50:40,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741832_1008 (size=32) 2024-11-17T22:50:40,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:40,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:50:40,372 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:50:40,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:50:40,374 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:50:40,374 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:50:40,375 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:50:40,375 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:50:40,376 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:50:40,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,376 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,376 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:50:40,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740 2024-11-17T22:50:40,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740 2024-11-17T22:50:40,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:50:40,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:50:40,378 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:50:40,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:50:40,380 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T22:50:40,381 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871721, jitterRate=0.10845085978507996}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:50:40,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731883840370Initializing all the Stores at 1731883840371 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883840371Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883840371Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883840371Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883840371Cleaning up temporary data from old regions at 1731883840378 (+7 ms)Region opened successfully at 1731883840381 (+3 ms) 2024-11-17T22:50:40,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:50:40,381 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:50:40,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:50:40,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:50:40,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:50:40,382 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:50:40,382 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883840381Disabling compacts and flushes for region at 1731883840381Disabling writes for close at 1731883840381Writing region close event to WAL at 1731883840382 (+1 ms)Closed at 1731883840382 2024-11-17T22:50:40,382 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:50:40,382 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T22:50:40,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T22:50:40,383 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:50:40,384 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T22:50:40,435 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(746): ClusterId : 246b3557-ad9c-4c2b-878b-a56139c800f6 2024-11-17T22:50:40,435 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T22:50:40,437 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T22:50:40,437 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T22:50:40,440 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T22:50:40,440 DEBUG [RS:0;1a6e40b21a48:38273 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@505b45df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a6e40b21a48/172.17.0.2:0 2024-11-17T22:50:40,456 DEBUG [RS:0;1a6e40b21a48:38273 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a6e40b21a48:38273 2024-11-17T22:50:40,456 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T22:50:40,456 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T22:50:40,456 DEBUG [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T22:50:40,457 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a6e40b21a48,44073,1731883840176 with port=38273, startcode=1731883840216 2024-11-17T22:50:40,457 DEBUG [RS:0;1a6e40b21a48:38273 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T22:50:40,459 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54969, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T22:50:40,459 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44073 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,459 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44073 {}] master.ServerManager(517): Registering regionserver=1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,460 DEBUG [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7 2024-11-17T22:50:40,460 DEBUG [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46873 2024-11-17T22:50:40,460 DEBUG [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T22:50:40,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:50:40,462 DEBUG [RS:0;1a6e40b21a48:38273 {}] zookeeper.ZKUtil(111): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,462 WARN [RS:0;1a6e40b21a48:38273 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T22:50:40,462 INFO [RS:0;1a6e40b21a48:38273 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:50:40,462 DEBUG [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,462 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a6e40b21a48,38273,1731883840216] 2024-11-17T22:50:40,465 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T22:50:40,467 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T22:50:40,467 INFO [RS:0;1a6e40b21a48:38273 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T22:50:40,467 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,467 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T22:50:40,468 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T22:50:40,468 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a6e40b21a48:0, corePoolSize=2, maxPoolSize=2 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a6e40b21a48:0, corePoolSize=1, maxPoolSize=1 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:50:40,468 DEBUG [RS:0;1a6e40b21a48:38273 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a6e40b21a48:0, corePoolSize=3, maxPoolSize=3 2024-11-17T22:50:40,469 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,469 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,469 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,469 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,469 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,470 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,38273,1731883840216-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:50:40,484 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T22:50:40,484 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,38273,1731883840216-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,484 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,484 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.Replication(171): 1a6e40b21a48,38273,1731883840216 started 2024-11-17T22:50:40,496 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:40,496 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1482): Serving as 1a6e40b21a48,38273,1731883840216, RpcServer on 1a6e40b21a48/172.17.0.2:38273, sessionid=0x1004fe18f090001 2024-11-17T22:50:40,496 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T22:50:40,496 DEBUG [RS:0;1a6e40b21a48:38273 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,496 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,38273,1731883840216' 2024-11-17T22:50:40,496 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T22:50:40,497 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T22:50:40,497 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T22:50:40,497 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T22:50:40,497 DEBUG [RS:0;1a6e40b21a48:38273 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,497 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a6e40b21a48,38273,1731883840216' 2024-11-17T22:50:40,497 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T22:50:40,498 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T22:50:40,498 DEBUG [RS:0;1a6e40b21a48:38273 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T22:50:40,498 INFO [RS:0;1a6e40b21a48:38273 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T22:50:40,498 INFO [RS:0;1a6e40b21a48:38273 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T22:50:40,534 WARN [1a6e40b21a48:44073 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-17T22:50:40,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:40,601 INFO [RS:0;1a6e40b21a48:38273 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C38273%2C1731883840216, suffix=, logDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/1a6e40b21a48,38273,1731883840216, archiveDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/oldWALs, maxLogs=32 2024-11-17T22:50:40,602 INFO [RS:0;1a6e40b21a48:38273 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C38273%2C1731883840216.1731883840602 2024-11-17T22:50:40,611 INFO [RS:0;1a6e40b21a48:38273 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/1a6e40b21a48,38273,1731883840216/1a6e40b21a48%2C38273%2C1731883840216.1731883840602 2024-11-17T22:50:40,613 DEBUG [RS:0;1a6e40b21a48:38273 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33051:33051),(127.0.0.1/127.0.0.1:43459:43459)] 2024-11-17T22:50:40,785 DEBUG [1a6e40b21a48:44073 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T22:50:40,786 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,789 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,38273,1731883840216, state=OPENING 2024-11-17T22:50:40,791 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T22:50:40,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:40,795 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T22:50:40,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,38273,1731883840216}] 2024-11-17T22:50:40,795 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:50:40,795 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:50:40,950 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T22:50:40,953 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43593, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T22:50:40,960 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T22:50:40,960 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:50:40,964 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a6e40b21a48%2C38273%2C1731883840216.meta, suffix=.meta, logDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/1a6e40b21a48,38273,1731883840216, archiveDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/oldWALs, maxLogs=32 2024-11-17T22:50:40,965 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a6e40b21a48%2C38273%2C1731883840216.meta.1731883840964.meta 2024-11-17T22:50:40,969 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/1a6e40b21a48,38273,1731883840216/1a6e40b21a48%2C38273%2C1731883840216.meta.1731883840964.meta 2024-11-17T22:50:40,972 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33051:33051),(127.0.0.1/127.0.0.1:43459:43459)] 2024-11-17T22:50:40,978 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T22:50:40,978 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T22:50:40,978 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T22:50:40,978 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T22:50:40,978 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T22:50:40,978 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T22:50:40,978 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T22:50:40,979 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T22:50:40,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T22:50:40,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T22:50:40,981 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T22:50:40,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T22:50:40,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T22:50:40,983 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T22:50:40,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T22:50:40,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T22:50:40,984 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T22:50:40,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T22:50:40,985 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T22:50:40,985 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740 2024-11-17T22:50:40,986 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740 2024-11-17T22:50:40,987 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T22:50:40,987 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T22:50:40,988 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T22:50:40,988 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T22:50:40,989 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737841, jitterRate=-0.06178778409957886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T22:50:40,989 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T22:50:40,990 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731883840979Writing region info on filesystem at 1731883840979Initializing all the Stores at 1731883840979Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883840979Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883840980 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731883840980Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731883840980Cleaning up temporary data from old regions at 1731883840987 (+7 ms)Running coprocessor post-open hooks at 1731883840989 (+2 ms)Region opened successfully at 1731883840990 (+1 ms) 2024-11-17T22:50:40,991 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731883840950 2024-11-17T22:50:40,993 DEBUG [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T22:50:40,993 INFO [RS_OPEN_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T22:50:40,993 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,994 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a6e40b21a48,38273,1731883840216, state=OPEN 2024-11-17T22:50:40,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:50:40,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T22:50:40,996 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:40,996 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:50:40,996 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T22:50:40,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T22:50:40,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a6e40b21a48,38273,1731883840216 in 201 msec 2024-11-17T22:50:41,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T22:50:41,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-17T22:50:41,002 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T22:50:41,002 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T22:50:41,004 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:50:41,004 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,38273,1731883840216, seqNum=-1] 2024-11-17T22:50:41,004 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:50:41,005 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54787, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:50:41,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 659 msec 2024-11-17T22:50:41,011 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731883841011, completionTime=-1 2024-11-17T22:50:41,011 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T22:50:41,011 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-17T22:50:41,013 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-17T22:50:41,013 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731883901013 2024-11-17T22:50:41,013 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731883961013 2024-11-17T22:50:41,014 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-17T22:50:41,014 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44073,1731883840176-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:41,014 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44073,1731883840176-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:41,014 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44073,1731883840176-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:41,014 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a6e40b21a48:44073, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:41,014 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:41,014 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:41,016 DEBUG [master/1a6e40b21a48:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T22:50:41,019 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.772sec 2024-11-17T22:50:41,019 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T22:50:41,019 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T22:50:41,019 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T22:50:41,019 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T22:50:41,019 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T22:50:41,019 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44073,1731883840176-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T22:50:41,019 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44073,1731883840176-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T22:50:41,022 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T22:50:41,022 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T22:50:41,022 INFO [master/1a6e40b21a48:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a6e40b21a48,44073,1731883840176-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T22:50:41,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f95849a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:50:41,035 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a6e40b21a48,44073,-1 for getting cluster id 2024-11-17T22:50:41,035 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T22:50:41,037 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '246b3557-ad9c-4c2b-878b-a56139c800f6' 2024-11-17T22:50:41,037 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T22:50:41,037 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "246b3557-ad9c-4c2b-878b-a56139c800f6" 2024-11-17T22:50:41,038 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d7b3a70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:50:41,038 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a6e40b21a48,44073,-1] 2024-11-17T22:50:41,038 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T22:50:41,038 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:41,039 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45910, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T22:50:41,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ae1ce13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T22:50:41,041 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T22:50:41,042 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a6e40b21a48,38273,1731883840216, seqNum=-1] 2024-11-17T22:50:41,043 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T22:50:41,044 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47282, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T22:50:41,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:41,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T22:50:41,050 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T22:50:41,050 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T22:50:41,053 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/oldWALs, maxLogs=32 2024-11-17T22:50:41,053 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731883841053 2024-11-17T22:50:41,058 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731883841053 2024-11-17T22:50:41,059 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43459:43459),(127.0.0.1/127.0.0.1:33051:33051)] 2024-11-17T22:50:41,060 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731883841060 2024-11-17T22:50:41,068 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,068 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,068 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,068 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,068 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,068 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731883841053 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731883841060 2024-11-17T22:50:41,069 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43459:43459),(127.0.0.1/127.0.0.1:33051:33051)] 2024-11-17T22:50:41,069 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731883841053 is not closed yet, will try archiving it next time 2024-11-17T22:50:41,070 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,070 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,070 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,070 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,070 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741835_1011 (size=93) 2024-11-17T22:50:41,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741835_1011 (size=93) 2024-11-17T22:50:41,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741836_1012 (size=93) 2024-11-17T22:50:41,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741836_1012 (size=93) 2024-11-17T22:50:41,072 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731883841053 to hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/oldWALs/test.com%2C8080%2C1.1731883841053 2024-11-17T22:50:41,074 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/oldWALs 2024-11-17T22:50:41,074 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731883841060) 2024-11-17T22:50:41,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T22:50:41,074 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:50:41,074 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:50:41,074 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:41,074 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:41,074 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T22:50:41,074 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T22:50:41,074 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1478412634, stopped=false 2024-11-17T22:50:41,074 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a6e40b21a48,44073,1731883840176 2024-11-17T22:50:41,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:50:41,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T22:50:41,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:41,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:41,076 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:50:41,076 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T22:50:41,076 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:50:41,076 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:50:41,076 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:41,076 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T22:50:41,076 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a6e40b21a48,38273,1731883840216' ***** 2024-11-17T22:50:41,076 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T22:50:41,076 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T22:50:41,076 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T22:50:41,076 INFO [RS:0;1a6e40b21a48:38273 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T22:50:41,076 INFO [RS:0;1a6e40b21a48:38273 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T22:50:41,076 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(959): stopping server 1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:41,077 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:50:41,077 INFO [RS:0;1a6e40b21a48:38273 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a6e40b21a48:38273. 2024-11-17T22:50:41,077 DEBUG [RS:0;1a6e40b21a48:38273 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T22:50:41,077 DEBUG [RS:0;1a6e40b21a48:38273 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:41,077 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T22:50:41,077 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T22:50:41,077 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T22:50:41,077 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T22:50:41,077 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-17T22:50:41,077 DEBUG [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-17T22:50:41,077 DEBUG [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-17T22:50:41,077 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T22:50:41,077 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T22:50:41,077 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T22:50:41,077 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T22:50:41,077 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T22:50:41,077 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-17T22:50:41,093 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740/.tmp/ns/27479e4d055e44439e7948cac0a0f7ce is 43, key is default/ns:d/1731883841006/Put/seqid=0 2024-11-17T22:50:41,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741837_1013 (size=5153) 2024-11-17T22:50:41,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741837_1013 (size=5153) 2024-11-17T22:50:41,098 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740/.tmp/ns/27479e4d055e44439e7948cac0a0f7ce 2024-11-17T22:50:41,103 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740/.tmp/ns/27479e4d055e44439e7948cac0a0f7ce as hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740/ns/27479e4d055e44439e7948cac0a0f7ce 2024-11-17T22:50:41,108 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740/ns/27479e4d055e44439e7948cac0a0f7ce, entries=2, sequenceid=6, filesize=5.0 K 2024-11-17T22:50:41,109 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false 2024-11-17T22:50:41,112 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-17T22:50:41,113 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T22:50:41,113 INFO [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T22:50:41,113 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731883841077Running coprocessor pre-close hooks at 1731883841077Disabling compacts and flushes for region at 1731883841077Disabling writes for close at 1731883841077Obtaining lock to block concurrent updates at 1731883841077Preparing flush snapshotting stores in 1588230740 at 1731883841077Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731883841078 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731883841078Flushing 1588230740/ns: creating writer at 1731883841078Flushing 1588230740/ns: appending metadata at 1731883841093 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731883841093Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46b56794: reopening flushed file at 1731883841102 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false at 1731883841109 (+7 ms)Writing region close event to WAL at 1731883841109Running coprocessor post-close hooks at 1731883841113 (+4 ms)Closed at 1731883841113 2024-11-17T22:50:41,113 DEBUG [RS_CLOSE_META-regionserver/1a6e40b21a48:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T22:50:41,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,45593,1731883664234/1a6e40b21a48%2C45593%2C1731883664234.meta.1731883665049.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:41,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/WALs/1a6e40b21a48,43411,1731883665200/1a6e40b21a48%2C43411%2C1731883665200.1731883665392 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:41,277 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(976): stopping server 1a6e40b21a48,38273,1731883840216; all regions closed. 2024-11-17T22:50:41,279 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,279 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,279 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,280 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,280 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741834_1010 (size=1152) 2024-11-17T22:50:41,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741834_1010 (size=1152) 2024-11-17T22:50:41,288 DEBUG [RS:0;1a6e40b21a48:38273 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/oldWALs 2024-11-17T22:50:41,289 INFO [RS:0;1a6e40b21a48:38273 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C38273%2C1731883840216.meta:.meta(num 1731883840964) 2024-11-17T22:50:41,289 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,289 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,289 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,290 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,290 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741833_1009 (size=93) 2024-11-17T22:50:41,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741833_1009 (size=93) 2024-11-17T22:50:41,294 DEBUG [RS:0;1a6e40b21a48:38273 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/oldWALs 2024-11-17T22:50:41,294 INFO [RS:0;1a6e40b21a48:38273 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a6e40b21a48%2C38273%2C1731883840216:(num 1731883840602) 2024-11-17T22:50:41,294 DEBUG [RS:0;1a6e40b21a48:38273 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T22:50:41,294 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T22:50:41,294 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:50:41,294 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.ChoreService(370): Chore service for: regionserver/1a6e40b21a48:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T22:50:41,294 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:50:41,294 INFO [regionserver/1a6e40b21a48:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:50:41,294 INFO [RS:0;1a6e40b21a48:38273 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38273 2024-11-17T22:50:41,296 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:50:41,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T22:50:41,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a6e40b21a48,38273,1731883840216 2024-11-17T22:50:41,296 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a6e40b21a48,38273,1731883840216] 2024-11-17T22:50:41,297 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a6e40b21a48,38273,1731883840216 already deleted, retry=false 2024-11-17T22:50:41,297 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a6e40b21a48,38273,1731883840216 expired; onlineServers=0 2024-11-17T22:50:41,297 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a6e40b21a48,44073,1731883840176' ***** 2024-11-17T22:50:41,297 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T22:50:41,297 INFO [M:0;1a6e40b21a48:44073 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T22:50:41,297 INFO [M:0;1a6e40b21a48:44073 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T22:50:41,297 DEBUG [M:0;1a6e40b21a48:44073 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T22:50:41,297 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T22:50:41,297 DEBUG [M:0;1a6e40b21a48:44073 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T22:50:41,297 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883840355 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.small.0-1731883840355,5,FailOnTimeoutGroup] 2024-11-17T22:50:41,297 DEBUG [master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883840355 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a6e40b21a48:0:becomeActiveMaster-HFileCleaner.large.0-1731883840355,5,FailOnTimeoutGroup] 2024-11-17T22:50:41,297 INFO [M:0;1a6e40b21a48:44073 {}] hbase.ChoreService(370): Chore service for: master/1a6e40b21a48:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T22:50:41,298 INFO [M:0;1a6e40b21a48:44073 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T22:50:41,298 DEBUG [M:0;1a6e40b21a48:44073 {}] master.HMaster(1795): Stopping service threads 2024-11-17T22:50:41,298 INFO [M:0;1a6e40b21a48:44073 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T22:50:41,298 INFO [M:0;1a6e40b21a48:44073 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T22:50:41,298 INFO [M:0;1a6e40b21a48:44073 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T22:50:41,298 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T22:50:41,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T22:50:41,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T22:50:41,298 DEBUG [M:0;1a6e40b21a48:44073 {}] zookeeper.ZKUtil(347): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T22:50:41,298 WARN [M:0;1a6e40b21a48:44073 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T22:50:41,299 INFO [M:0;1a6e40b21a48:44073 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/.lastflushedseqids 2024-11-17T22:50:41,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741838_1014 (size=99) 2024-11-17T22:50:41,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741838_1014 (size=99) 2024-11-17T22:50:41,304 INFO [M:0;1a6e40b21a48:44073 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T22:50:41,304 INFO [M:0;1a6e40b21a48:44073 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T22:50:41,304 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T22:50:41,304 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:41,304 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:41,304 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T22:50:41,304 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:41,304 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-17T22:50:41,319 DEBUG [M:0;1a6e40b21a48:44073 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc698d6f567a4f19bd7e17da3df2f054 is 82, key is hbase:meta,,1/info:regioninfo/1731883840993/Put/seqid=0 2024-11-17T22:50:41,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741839_1015 (size=5672) 2024-11-17T22:50:41,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741839_1015 (size=5672) 2024-11-17T22:50:41,324 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc698d6f567a4f19bd7e17da3df2f054 2024-11-17T22:50:41,340 DEBUG [M:0;1a6e40b21a48:44073 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8ba5a051ac034433915c60728b338bac is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731883841010/Put/seqid=0 2024-11-17T22:50:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741840_1016 (size=5275) 2024-11-17T22:50:41,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741840_1016 (size=5275) 2024-11-17T22:50:41,345 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8ba5a051ac034433915c60728b338bac 2024-11-17T22:50:41,368 DEBUG [M:0;1a6e40b21a48:44073 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/106d1e1006ad42718bb5f17ad6409569 is 69, key is 1a6e40b21a48,38273,1731883840216/rs:state/1731883840459/Put/seqid=0 2024-11-17T22:50:41,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741841_1017 (size=5156) 2024-11-17T22:50:41,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741841_1017 (size=5156) 2024-11-17T22:50:41,373 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/106d1e1006ad42718bb5f17ad6409569 2024-11-17T22:50:41,391 DEBUG [M:0;1a6e40b21a48:44073 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bca1be0b5e8e40f699e455bcd060eaef is 52, key is load_balancer_on/state:d/1731883841049/Put/seqid=0 2024-11-17T22:50:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741842_1018 (size=5056) 2024-11-17T22:50:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741842_1018 (size=5056) 2024-11-17T22:50:41,395 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bca1be0b5e8e40f699e455bcd060eaef 2024-11-17T22:50:41,396 INFO [RS:0;1a6e40b21a48:38273 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:50:41,397 INFO [RS:0;1a6e40b21a48:38273 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a6e40b21a48,38273,1731883840216; zookeeper connection closed. 2024-11-17T22:50:41,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:50:41,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38273-0x1004fe18f090001, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:50:41,397 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4c8258dc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4c8258dc 2024-11-17T22:50:41,397 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T22:50:41,400 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc698d6f567a4f19bd7e17da3df2f054 as hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc698d6f567a4f19bd7e17da3df2f054 2024-11-17T22:50:41,404 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc698d6f567a4f19bd7e17da3df2f054, entries=8, sequenceid=29, filesize=5.5 K 2024-11-17T22:50:41,405 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8ba5a051ac034433915c60728b338bac as hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8ba5a051ac034433915c60728b338bac 2024-11-17T22:50:41,409 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8ba5a051ac034433915c60728b338bac, entries=3, sequenceid=29, filesize=5.2 K 2024-11-17T22:50:41,410 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/106d1e1006ad42718bb5f17ad6409569 as hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/106d1e1006ad42718bb5f17ad6409569 2024-11-17T22:50:41,413 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/106d1e1006ad42718bb5f17ad6409569, entries=1, sequenceid=29, filesize=5.0 K 2024-11-17T22:50:41,414 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bca1be0b5e8e40f699e455bcd060eaef as hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bca1be0b5e8e40f699e455bcd060eaef 2024-11-17T22:50:41,417 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46873/user/jenkins/test-data/e028f2de-5d93-b50e-c368-d21518241fb7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bca1be0b5e8e40f699e455bcd060eaef, entries=1, sequenceid=29, filesize=4.9 K 2024-11-17T22:50:41,418 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false 2024-11-17T22:50:41,420 INFO [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T22:50:41,420 DEBUG [M:0;1a6e40b21a48:44073 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731883841304Disabling compacts and flushes for region at 1731883841304Disabling writes for close at 1731883841304Obtaining lock to block concurrent updates at 1731883841304Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731883841304Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731883841305 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731883841305Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731883841305Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731883841319 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731883841319Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731883841327 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731883841340 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731883841340Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731883841349 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731883841367 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731883841367Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731883841376 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731883841391 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731883841391Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cf4b6e3: reopening flushed file at 1731883841399 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cc00a14: reopening flushed file at 1731883841404 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15e1f41b: reopening flushed file at 1731883841409 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15ea380e: reopening flushed file at 1731883841413 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false at 1731883841418 (+5 ms)Writing region close event to WAL at 1731883841420 (+2 ms)Closed at 1731883841420 2024-11-17T22:50:41,420 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,421 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,421 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,421 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T22:50:41,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34081 is added to blk_1073741830_1006 (size=10311) 2024-11-17T22:50:41,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40435 is added to blk_1073741830_1006 (size=10311) 2024-11-17T22:50:41,423 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T22:50:41,423 INFO [M:0;1a6e40b21a48:44073 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T22:50:41,423 INFO [M:0;1a6e40b21a48:44073 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44073 2024-11-17T22:50:41,423 INFO [M:0;1a6e40b21a48:44073 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T22:50:41,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:50:41,525 INFO [M:0;1a6e40b21a48:44073 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T22:50:41,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44073-0x1004fe18f090000, quorum=127.0.0.1:63796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T22:50:41,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@352a3917{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:50:41,531 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@349fe29e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:50:41,531 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:50:41,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@377e4a58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:50:41,532 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e09407e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/hadoop.log.dir/,STOPPED} 2024-11-17T22:50:41,533 WARN [BP-930226705-172.17.0.2-1731883839589 heartbeating to localhost/127.0.0.1:46873 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:50:41,533 WARN [BP-930226705-172.17.0.2-1731883839589 heartbeating to localhost/127.0.0.1:46873 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-930226705-172.17.0.2-1731883839589 (Datanode Uuid 4d936ee7-7c32-430e-adb6-87b9b20917aa) service to localhost/127.0.0.1:46873 2024-11-17T22:50:41,533 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:50:41,533 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:50:41,533 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/data/data3/current/BP-930226705-172.17.0.2-1731883839589 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:50:41,533 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/data/data4/current/BP-930226705-172.17.0.2-1731883839589 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:50:41,533 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:50:41,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5bfccd5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T22:50:41,539 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@444363ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:50:41,539 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:50:41,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d0f6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:50:41,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c3c893{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/hadoop.log.dir/,STOPPED} 2024-11-17T22:50:41,541 WARN [BP-930226705-172.17.0.2-1731883839589 heartbeating to localhost/127.0.0.1:46873 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T22:50:41,541 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T22:50:41,541 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T22:50:41,541 WARN [BP-930226705-172.17.0.2-1731883839589 heartbeating to localhost/127.0.0.1:46873 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-930226705-172.17.0.2-1731883839589 (Datanode Uuid 26aa8420-78a0-4cd1-a3f4-0d30864b4921) service to localhost/127.0.0.1:46873 2024-11-17T22:50:41,541 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/data/data1/current/BP-930226705-172.17.0.2-1731883839589 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:50:41,542 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/cluster_ff8c3934-7edd-8bdd-a48a-6ec929cde6c4/data/data2/current/BP-930226705-172.17.0.2-1731883839589 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T22:50:41,542 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T22:50:41,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2394ff19{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T22:50:41,551 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34b79769{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T22:50:41,551 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T22:50:41,551 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31a449e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T22:50:41,551 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@251f308d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ba6b193d-c369-45f2-ccf1-a69a64011c9e/hadoop.log.dir/,STOPPED} 2024-11-17T22:50:41,556 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T22:50:41,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T22:50:41,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39901/user/jenkins/test-data/59cc98d4-e024-d532-9de6-0e59c5ece048/MasterData/WALs/1a6e40b21a48,44599,1731883664183/1a6e40b21a48%2C44599%2C1731883664183.1731883664335 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T22:50:41,582 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 230) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46873 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46873 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46873 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:46873 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46873 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46873 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46873 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46873 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=541 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=99 (was 99), ProcessCount=11 (was 11), AvailableMemoryMB=3953 (was 3967)