2024-11-08 00:48:21,480 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-11-08 00:48:21,491 main DEBUG Took 0.009535 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-08 00:48:21,491 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-08 00:48:21,492 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-08 00:48:21,493 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-08 00:48:21,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,510 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-08 00:48:21,526 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,528 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,529 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,529 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,530 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,530 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,531 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,531 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,531 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,532 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,533 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,533 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,533 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,534 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,534 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,534 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,535 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,535 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,536 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,536 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,536 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,537 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,537 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,537 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:48:21,538 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,538 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-08 00:48:21,540 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:48:21,541 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-08 00:48:21,543 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-08 00:48:21,543 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-08 00:48:21,544 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-08 00:48:21,545 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-08 00:48:21,553 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-08 00:48:21,557 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-08 00:48:21,558 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-08 00:48:21,559 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-08 00:48:21,559 main DEBUG createAppenders(={Console}) 2024-11-08 00:48:21,560 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a initialized 2024-11-08 00:48:21,560 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-11-08 00:48:21,561 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a OK. 2024-11-08 00:48:21,561 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-08 00:48:21,562 main DEBUG OutputStream closed 2024-11-08 00:48:21,562 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-08 00:48:21,562 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-08 00:48:21,562 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@21e360a OK 2024-11-08 00:48:21,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-08 00:48:21,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-08 00:48:21,649 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-08 00:48:21,650 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-08 00:48:21,651 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-08 00:48:21,651 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-08 00:48:21,651 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-08 00:48:21,652 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-08 00:48:21,652 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-08 00:48:21,653 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-08 00:48:21,653 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-08 00:48:21,654 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-08 00:48:21,654 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-08 00:48:21,655 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-08 00:48:21,655 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-08 00:48:21,655 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-08 00:48:21,656 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-08 00:48:21,657 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-08 00:48:21,659 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08 00:48:21,659 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@42b02722) with optional ClassLoader: null 2024-11-08 00:48:21,660 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-08 00:48:21,661 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@42b02722] started OK. 2024-11-08T00:48:21,677 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle timeout: 13 mins 2024-11-08 00:48:21,681 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-08 00:48:21,682 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08T00:48:21,981 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be 2024-11-08T00:48:21,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=2, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T00:48:22,019 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59, deleteOnExit=true 2024-11-08T00:48:22,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T00:48:22,021 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/test.cache.data in system properties and HBase conf 2024-11-08T00:48:22,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T00:48:22,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/hadoop.log.dir in system properties and HBase conf 2024-11-08T00:48:22,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T00:48:22,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T00:48:22,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T00:48:22,149 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-08T00:48:22,293 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T00:48:22,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:48:22,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:48:22,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T00:48:22,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:48:22,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T00:48:22,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T00:48:22,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:48:22,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:48:22,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T00:48:22,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/nfs.dump.dir in system properties and HBase conf 2024-11-08T00:48:22,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/java.io.tmpdir in system properties and HBase conf 2024-11-08T00:48:22,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:48:22,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T00:48:22,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T00:48:23,257 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-08T00:48:23,347 INFO [Time-limited test {}] log.Log(170): Logging initialized @2605ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-08T00:48:23,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:48:23,495 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:48:23,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:48:23,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:48:23,520 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:48:23,537 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:48:23,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d649244{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:48:23,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b1a9f49{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:48:23,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@300f56e1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/java.io.tmpdir/jetty-localhost-38023-hadoop-hdfs-3_4_1-tests_jar-_-any-7463670907310094244/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:48:23,765 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51edec43{HTTP/1.1, (http/1.1)}{localhost:38023} 2024-11-08T00:48:23,765 INFO [Time-limited test {}] server.Server(415): Started @3024ms 2024-11-08T00:48:24,171 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:48:24,178 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:48:24,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:48:24,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:48:24,179 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:48:24,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7826ba03{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:48:24,181 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f308f72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:48:24,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@286f0a0e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/java.io.tmpdir/jetty-localhost-41601-hadoop-hdfs-3_4_1-tests_jar-_-any-2939770108512212590/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:48:24,307 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cda2dcd{HTTP/1.1, (http/1.1)}{localhost:41601} 2024-11-08T00:48:24,307 INFO [Time-limited test {}] server.Server(415): Started @3566ms 2024-11-08T00:48:24,364 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:48:24,485 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:48:24,491 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:48:24,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:48:24,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:48:24,496 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:48:24,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15f32f09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:48:24,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d84e403{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:48:24,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47a5f093{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/java.io.tmpdir/jetty-localhost-37075-hadoop-hdfs-3_4_1-tests_jar-_-any-10390308533393961953/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:48:24,627 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17ecf0d5{HTTP/1.1, (http/1.1)}{localhost:37075} 2024-11-08T00:48:24,627 INFO [Time-limited test {}] server.Server(415): Started @3886ms 2024-11-08T00:48:24,631 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:48:24,863 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/data/data2/current/BP-1124388816-172.17.0.2-1731026902973/current, will proceed with Du for space computation calculation, 2024-11-08T00:48:24,863 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/data/data1/current/BP-1124388816-172.17.0.2-1731026902973/current, will proceed with Du for space computation calculation, 2024-11-08T00:48:24,863 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/data/data4/current/BP-1124388816-172.17.0.2-1731026902973/current, will proceed with Du for space computation calculation, 2024-11-08T00:48:24,863 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/data/data3/current/BP-1124388816-172.17.0.2-1731026902973/current, will proceed with Du for space computation calculation, 2024-11-08T00:48:24,922 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:48:24,923 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:48:24,999 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa8c7a839d9509689 with lease ID 0xc142c99d6b51d962: Processing first storage report for DS-1215f7db-7ec6-4ee8-964e-f978599b7c57 from datanode DatanodeRegistration(127.0.0.1:33807, datanodeUuid=52ce8828-7352-4067-b8d2-b05ef156c26e, infoPort=34211, infoSecurePort=0, ipcPort=34133, storageInfo=lv=-57;cid=testClusterID;nsid=1948285554;c=1731026902973) 2024-11-08T00:48:25,001 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8c7a839d9509689 with lease ID 0xc142c99d6b51d962: from storage DS-1215f7db-7ec6-4ee8-964e-f978599b7c57 node DatanodeRegistration(127.0.0.1:33807, datanodeUuid=52ce8828-7352-4067-b8d2-b05ef156c26e, infoPort=34211, infoSecurePort=0, ipcPort=34133, storageInfo=lv=-57;cid=testClusterID;nsid=1948285554;c=1731026902973), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-08T00:48:25,001 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94e46eb7adb75de with lease ID 0xc142c99d6b51d961: Processing first storage report for DS-b4ee67b5-eafb-46f1-9643-b58926c54623 from datanode DatanodeRegistration(127.0.0.1:44001, datanodeUuid=94c00e7b-aaee-46ce-8dc1-e851c27a3c97, infoPort=42267, infoSecurePort=0, ipcPort=43877, storageInfo=lv=-57;cid=testClusterID;nsid=1948285554;c=1731026902973) 2024-11-08T00:48:25,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94e46eb7adb75de with lease ID 0xc142c99d6b51d961: from storage DS-b4ee67b5-eafb-46f1-9643-b58926c54623 node DatanodeRegistration(127.0.0.1:44001, datanodeUuid=94c00e7b-aaee-46ce-8dc1-e851c27a3c97, infoPort=42267, infoSecurePort=0, ipcPort=43877, storageInfo=lv=-57;cid=testClusterID;nsid=1948285554;c=1731026902973), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T00:48:25,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa8c7a839d9509689 with lease ID 0xc142c99d6b51d962: Processing first storage report for DS-4838411b-943c-482f-a36a-ea2c44d59d9c from datanode DatanodeRegistration(127.0.0.1:33807, datanodeUuid=52ce8828-7352-4067-b8d2-b05ef156c26e, infoPort=34211, infoSecurePort=0, ipcPort=34133, storageInfo=lv=-57;cid=testClusterID;nsid=1948285554;c=1731026902973) 2024-11-08T00:48:25,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8c7a839d9509689 with lease ID 0xc142c99d6b51d962: from storage DS-4838411b-943c-482f-a36a-ea2c44d59d9c node DatanodeRegistration(127.0.0.1:33807, datanodeUuid=52ce8828-7352-4067-b8d2-b05ef156c26e, infoPort=34211, infoSecurePort=0, ipcPort=34133, storageInfo=lv=-57;cid=testClusterID;nsid=1948285554;c=1731026902973), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:48:25,003 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94e46eb7adb75de with lease ID 0xc142c99d6b51d961: Processing first storage report for DS-51e9f5a7-efbb-4f8e-b838-345e07dd905b from datanode DatanodeRegistration(127.0.0.1:44001, datanodeUuid=94c00e7b-aaee-46ce-8dc1-e851c27a3c97, infoPort=42267, infoSecurePort=0, ipcPort=43877, storageInfo=lv=-57;cid=testClusterID;nsid=1948285554;c=1731026902973) 2024-11-08T00:48:25,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94e46eb7adb75de with lease ID 0xc142c99d6b51d961: from storage DS-51e9f5a7-efbb-4f8e-b838-345e07dd905b node DatanodeRegistration(127.0.0.1:44001, datanodeUuid=94c00e7b-aaee-46ce-8dc1-e851c27a3c97, infoPort=42267, infoSecurePort=0, ipcPort=43877, storageInfo=lv=-57;cid=testClusterID;nsid=1948285554;c=1731026902973), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T00:48:25,050 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be 2024-11-08T00:48:25,133 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/zookeeper_0, clientPort=63994, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T00:48:25,143 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63994 2024-11-08T00:48:25,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:25,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:25,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:48:25,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:48:25,796 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 with version=8 2024-11-08T00:48:25,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/hbase-staging 2024-11-08T00:48:25,892 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-08T00:48:26,144 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c302dddf724e:0 server-side Connection retries=45 2024-11-08T00:48:26,155 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,155 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,161 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:48:26,161 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:48:26,311 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T00:48:26,372 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-08T00:48:26,383 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-08T00:48:26,387 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:48:26,416 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19581 (auto-detected) 2024-11-08T00:48:26,417 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-08T00:48:26,440 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46149 2024-11-08T00:48:26,463 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46149 connecting to ZooKeeper ensemble=127.0.0.1:63994 2024-11-08T00:48:26,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461490x0, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:48:26,504 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46149-0x100f50c52a90000 connected 2024-11-08T00:48:26,547 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:26,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:26,559 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:48:26,563 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4, hbase.cluster.distributed=false 2024-11-08T00:48:26,588 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:48:26,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46149 2024-11-08T00:48:26,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46149 2024-11-08T00:48:26,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46149 2024-11-08T00:48:26,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46149 2024-11-08T00:48:26,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46149 2024-11-08T00:48:26,723 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c302dddf724e:0 server-side Connection retries=45 2024-11-08T00:48:26,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,725 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:48:26,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:48:26,729 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:48:26,731 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:48:26,732 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44401 2024-11-08T00:48:26,734 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44401 connecting to ZooKeeper ensemble=127.0.0.1:63994 2024-11-08T00:48:26,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:26,738 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:26,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:444010x0, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:48:26,746 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:444010x0, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:48:26,747 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44401-0x100f50c52a90001 connected 2024-11-08T00:48:26,751 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:48:26,758 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:48:26,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:48:26,766 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:48:26,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44401 2024-11-08T00:48:26,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44401 2024-11-08T00:48:26,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44401 2024-11-08T00:48:26,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44401 2024-11-08T00:48:26,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44401 2024-11-08T00:48:26,787 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c302dddf724e:0 server-side Connection retries=45 2024-11-08T00:48:26,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,788 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:48:26,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:48:26,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:48:26,789 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:48:26,789 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:48:26,790 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36935 2024-11-08T00:48:26,792 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36935 connecting to ZooKeeper ensemble=127.0.0.1:63994 2024-11-08T00:48:26,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:26,795 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:26,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369350x0, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:48:26,801 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36935-0x100f50c52a90002 connected 2024-11-08T00:48:26,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:48:26,802 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:48:26,803 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:48:26,804 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:48:26,806 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:48:26,806 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36935 2024-11-08T00:48:26,807 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36935 2024-11-08T00:48:26,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36935 2024-11-08T00:48:26,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36935 2024-11-08T00:48:26,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36935 2024-11-08T00:48:26,824 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c302dddf724e:46149 2024-11-08T00:48:26,825 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c302dddf724e,46149,1731026905945 2024-11-08T00:48:26,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:48:26,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:48:26,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:48:26,835 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c302dddf724e,46149,1731026905945 2024-11-08T00:48:26,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:48:26,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:48:26,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:26,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:26,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:26,861 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:48:26,862 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c302dddf724e,46149,1731026905945 from backup master directory 2024-11-08T00:48:26,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c302dddf724e,46149,1731026905945 2024-11-08T00:48:26,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:48:26,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:48:26,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:48:26,866 WARN [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:48:26,866 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c302dddf724e,46149,1731026905945 2024-11-08T00:48:26,868 INFO [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-08T00:48:26,870 INFO [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-08T00:48:26,927 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/hbase.id] with ID: 0de92a3d-246f-491f-8d40-20aabd3b8c65 2024-11-08T00:48:26,927 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/.tmp/hbase.id 2024-11-08T00:48:26,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:48:26,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:48:26,940 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/.tmp/hbase.id]:[hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/hbase.id] 2024-11-08T00:48:26,982 INFO [master/c302dddf724e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:48:26,987 INFO [master/c302dddf724e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T00:48:27,006 INFO [master/c302dddf724e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-08T00:48:27,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:48:27,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:48:27,044 INFO [master/c302dddf724e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:48:27,047 INFO [master/c302dddf724e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T00:48:27,064 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:48:27,069 INFO [master/c302dddf724e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T00:48:27,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:48:27,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:48:27,117 INFO [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store 2024-11-08T00:48:27,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:48:27,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:48:27,142 INFO [master/c302dddf724e:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-08T00:48:27,145 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:27,147 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:48:27,147 INFO [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:48:27,147 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:48:27,149 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:48:27,150 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:48:27,150 INFO [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:48:27,151 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026907147Disabling compacts and flushes for region at 1731026907147Disabling writes for close at 1731026907149 (+2 ms)Writing region close event to WAL at 1731026907150 (+1 ms)Closed at 1731026907150 2024-11-08T00:48:27,154 WARN [master/c302dddf724e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/.initializing 2024-11-08T00:48:27,154 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/WALs/c302dddf724e,46149,1731026905945 2024-11-08T00:48:27,163 INFO [master/c302dddf724e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T00:48:27,180 INFO [master/c302dddf724e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c302dddf724e%2C46149%2C1731026905945, suffix=, logDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/WALs/c302dddf724e,46149,1731026905945, archiveDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/oldWALs, maxLogs=10 2024-11-08T00:48:27,205 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/WALs/c302dddf724e,46149,1731026905945/c302dddf724e%2C46149%2C1731026905945.1731026907185, exclude list is [], retry=0 2024-11-08T00:48:27,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33807,DS-1215f7db-7ec6-4ee8-964e-f978599b7c57,DISK] 2024-11-08T00:48:27,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44001,DS-b4ee67b5-eafb-46f1-9643-b58926c54623,DISK] 2024-11-08T00:48:27,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-08T00:48:27,272 INFO [master/c302dddf724e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/WALs/c302dddf724e,46149,1731026905945/c302dddf724e%2C46149%2C1731026905945.1731026907185 2024-11-08T00:48:27,273 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42267:42267),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-08T00:48:27,274 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:48:27,275 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:27,278 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,280 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,322 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,352 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T00:48:27,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:27,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:27,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T00:48:27,364 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:27,366 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:27,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,372 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T00:48:27,372 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:27,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:27,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T00:48:27,381 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:27,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:27,383 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,387 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,388 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,393 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,394 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,397 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:48:27,401 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:48:27,406 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:48:27,409 INFO [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69193538, jitterRate=0.03106406331062317}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:48:27,417 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731026907296Initializing all the Stores at 1731026907299 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026907299Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026907300 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026907300Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026907300Cleaning up temporary data from old regions at 1731026907394 (+94 ms)Region opened successfully at 1731026907417 (+23 ms) 2024-11-08T00:48:27,418 INFO [master/c302dddf724e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T00:48:27,462 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22544bd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c302dddf724e/172.17.0.2:0 2024-11-08T00:48:27,502 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T00:48:27,516 INFO [master/c302dddf724e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T00:48:27,516 INFO [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T00:48:27,520 INFO [master/c302dddf724e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T00:48:27,521 INFO [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-08T00:48:27,527 INFO [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-08T00:48:27,527 INFO [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T00:48:27,557 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T00:48:27,566 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T00:48:27,569 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T00:48:27,571 INFO [master/c302dddf724e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T00:48:27,573 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T00:48:27,574 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T00:48:27,577 INFO [master/c302dddf724e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T00:48:27,580 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T00:48:27,581 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T00:48:27,582 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T00:48:27,584 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T00:48:27,601 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T00:48:27,604 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T00:48:27,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:48:27,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:48:27,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:48:27,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,611 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c302dddf724e,46149,1731026905945, sessionid=0x100f50c52a90000, setting cluster-up flag (Was=false) 2024-11-08T00:48:27,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,630 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T00:48:27,632 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c302dddf724e,46149,1731026905945 2024-11-08T00:48:27,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:27,644 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T00:48:27,646 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c302dddf724e,46149,1731026905945 2024-11-08T00:48:27,652 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T00:48:27,679 INFO [master/c302dddf724e:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver loaded, priority=536870911. 2024-11-08T00:48:27,713 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(746): ClusterId : 0de92a3d-246f-491f-8d40-20aabd3b8c65 2024-11-08T00:48:27,713 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(746): ClusterId : 0de92a3d-246f-491f-8d40-20aabd3b8c65 2024-11-08T00:48:27,716 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:48:27,716 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:48:27,727 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:48:27,727 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:48:27,727 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:48:27,727 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:48:27,730 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:48:27,730 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:48:27,730 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T00:48:27,731 DEBUG [RS:1;c302dddf724e:36935 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7427916a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c302dddf724e/172.17.0.2:0 2024-11-08T00:48:27,731 DEBUG [RS:0;c302dddf724e:44401 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e33d44e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c302dddf724e/172.17.0.2:0 2024-11-08T00:48:27,740 INFO [master/c302dddf724e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T00:48:27,746 INFO [master/c302dddf724e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T00:48:27,748 DEBUG [RS:1;c302dddf724e:36935 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c302dddf724e:36935 2024-11-08T00:48:27,749 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c302dddf724e:44401 2024-11-08T00:48:27,752 INFO [RS:1;c302dddf724e:36935 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:48:27,752 INFO [RS:0;c302dddf724e:44401 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:48:27,752 INFO [RS:1;c302dddf724e:36935 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:48:27,752 INFO [RS:0;c302dddf724e:44401 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:48:27,753 DEBUG [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:48:27,753 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:48:27,756 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(2659): reportForDuty to master=c302dddf724e,46149,1731026905945 with port=36935, startcode=1731026906786 2024-11-08T00:48:27,756 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(2659): reportForDuty to master=c302dddf724e,46149,1731026905945 with port=44401, startcode=1731026906683 2024-11-08T00:48:27,753 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c302dddf724e,46149,1731026905945 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T00:48:27,761 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c302dddf724e:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:48:27,762 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c302dddf724e:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:48:27,762 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c302dddf724e:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:48:27,762 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c302dddf724e:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:48:27,762 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c302dddf724e:0, corePoolSize=10, maxPoolSize=10 2024-11-08T00:48:27,762 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,762 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c302dddf724e:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:48:27,763 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,769 DEBUG [RS:0;c302dddf724e:44401 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:48:27,769 DEBUG [RS:1;c302dddf724e:36935 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:48:27,770 INFO [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731026937769 2024-11-08T00:48:27,771 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T00:48:27,773 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T00:48:27,773 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:48:27,773 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T00:48:27,777 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T00:48:27,777 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T00:48:27,777 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T00:48:27,778 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T00:48:27,779 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:27,779 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T00:48:27,779 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,782 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T00:48:27,783 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T00:48:27,784 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T00:48:27,788 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T00:48:27,789 INFO [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T00:48:27,792 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c302dddf724e:0:becomeActiveMaster-HFileCleaner.large.0-1731026907791,5,FailOnTimeoutGroup] 2024-11-08T00:48:27,794 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c302dddf724e:0:becomeActiveMaster-HFileCleaner.small.0-1731026907793,5,FailOnTimeoutGroup] 2024-11-08T00:48:27,794 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,795 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T00:48:27,796 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,797 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:48:27,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:48:27,800 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T00:48:27,801 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:27,810 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32999, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:48:27,810 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50823, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:48:27,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:48:27,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:48:27,816 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46149 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c302dddf724e,36935,1731026906786 2024-11-08T00:48:27,819 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46149 {}] master.ServerManager(517): Registering regionserver=c302dddf724e,36935,1731026906786 2024-11-08T00:48:27,829 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46149 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c302dddf724e,44401,1731026906683 2024-11-08T00:48:27,829 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46149 {}] master.ServerManager(517): Registering regionserver=c302dddf724e,44401,1731026906683 2024-11-08T00:48:27,832 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:27,832 DEBUG [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:27,833 DEBUG [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36609 2024-11-08T00:48:27,833 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36609 2024-11-08T00:48:27,833 DEBUG [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:48:27,833 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:48:27,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:48:27,842 DEBUG [RS:1;c302dddf724e:36935 {}] zookeeper.ZKUtil(111): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c302dddf724e,36935,1731026906786 2024-11-08T00:48:27,842 DEBUG [RS:0;c302dddf724e:44401 {}] zookeeper.ZKUtil(111): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c302dddf724e,44401,1731026906683 2024-11-08T00:48:27,842 WARN [RS:0;c302dddf724e:44401 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:48:27,842 WARN [RS:1;c302dddf724e:36935 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:48:27,842 INFO [RS:1;c302dddf724e:36935 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T00:48:27,842 INFO [RS:0;c302dddf724e:44401 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T00:48:27,843 DEBUG [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,36935,1731026906786 2024-11-08T00:48:27,843 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,44401,1731026906683 2024-11-08T00:48:27,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c302dddf724e,36935,1731026906786] 2024-11-08T00:48:27,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c302dddf724e,44401,1731026906683] 2024-11-08T00:48:27,869 INFO [RS:0;c302dddf724e:44401 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:48:27,869 INFO [RS:1;c302dddf724e:36935 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:48:27,883 INFO [RS:0;c302dddf724e:44401 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:48:27,883 INFO [RS:1;c302dddf724e:36935 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:48:27,888 INFO [RS:1;c302dddf724e:36935 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:48:27,888 INFO [RS:0;c302dddf724e:44401 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:48:27,889 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,889 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,890 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:48:27,890 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:48:27,896 INFO [RS:0;c302dddf724e:44401 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:48:27,896 INFO [RS:1;c302dddf724e:36935 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:48:27,897 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,897 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,898 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,898 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,898 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,898 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,898 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,898 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,898 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,898 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,899 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,899 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,899 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c302dddf724e:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:48:27,899 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c302dddf724e:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:48:27,899 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,899 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,899 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,899 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,899 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,899 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,900 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,900 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,900 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,900 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,900 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,900 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c302dddf724e:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:48:27,900 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c302dddf724e:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:48:27,900 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c302dddf724e:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:48:27,900 DEBUG [RS:0;c302dddf724e:44401 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c302dddf724e:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:48:27,900 DEBUG [RS:1;c302dddf724e:36935 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c302dddf724e:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:48:27,902 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,36935,1731026906786-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,902 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,903 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,44401,1731026906683-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:48:27,925 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:48:27,925 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:48:27,927 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,36935,1731026906786-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,927 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,44401,1731026906683-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,927 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,927 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,928 INFO [RS:1;c302dddf724e:36935 {}] regionserver.Replication(171): c302dddf724e,36935,1731026906786 started 2024-11-08T00:48:27,928 INFO [RS:0;c302dddf724e:44401 {}] regionserver.Replication(171): c302dddf724e,44401,1731026906683 started 2024-11-08T00:48:27,948 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,948 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,948 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(1482): Serving as c302dddf724e,36935,1731026906786, RpcServer on c302dddf724e/172.17.0.2:36935, sessionid=0x100f50c52a90002 2024-11-08T00:48:27,948 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1482): Serving as c302dddf724e,44401,1731026906683, RpcServer on c302dddf724e/172.17.0.2:44401, sessionid=0x100f50c52a90001 2024-11-08T00:48:27,949 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:48:27,949 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:48:27,949 DEBUG [RS:0;c302dddf724e:44401 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c302dddf724e,44401,1731026906683 2024-11-08T00:48:27,949 DEBUG [RS:1;c302dddf724e:36935 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c302dddf724e,36935,1731026906786 2024-11-08T00:48:27,950 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c302dddf724e,44401,1731026906683' 2024-11-08T00:48:27,950 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c302dddf724e,36935,1731026906786' 2024-11-08T00:48:27,950 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:48:27,950 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:48:27,951 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:48:27,951 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:48:27,951 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:48:27,951 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:48:27,952 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:48:27,952 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:48:27,952 DEBUG [RS:0;c302dddf724e:44401 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c302dddf724e,44401,1731026906683 2024-11-08T00:48:27,952 DEBUG [RS:1;c302dddf724e:36935 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c302dddf724e,36935,1731026906786 2024-11-08T00:48:27,952 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c302dddf724e,44401,1731026906683' 2024-11-08T00:48:27,952 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:48:27,952 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c302dddf724e,36935,1731026906786' 2024-11-08T00:48:27,952 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:48:27,953 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:48:27,953 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:48:27,954 DEBUG [RS:1;c302dddf724e:36935 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:48:27,954 INFO [RS:1;c302dddf724e:36935 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-11-08T00:48:27,954 DEBUG [RS:0;c302dddf724e:44401 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:48:27,954 INFO [RS:0;c302dddf724e:44401 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-11-08T00:48:27,956 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,956 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,956 DEBUG [RS:0;c302dddf724e:44401 {}] zookeeper.ZKUtil(347): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-11-08T00:48:27,957 INFO [RS:0;c302dddf724e:44401 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-11-08T00:48:27,957 DEBUG [RS:1;c302dddf724e:36935 {}] zookeeper.ZKUtil(347): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-11-08T00:48:27,957 INFO [RS:1;c302dddf724e:36935 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-11-08T00:48:27,957 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,957 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,957 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:27,957 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,064 INFO [RS:0;c302dddf724e:44401 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T00:48:28,064 INFO [RS:1;c302dddf724e:36935 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T00:48:28,068 INFO [RS:1;c302dddf724e:36935 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c302dddf724e%2C36935%2C1731026906786, suffix=, logDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,36935,1731026906786, archiveDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/oldWALs, maxLogs=32 2024-11-08T00:48:28,068 INFO [RS:0;c302dddf724e:44401 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c302dddf724e%2C44401%2C1731026906683, suffix=, logDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,44401,1731026906683, archiveDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/oldWALs, maxLogs=32 2024-11-08T00:48:28,087 DEBUG [RS:0;c302dddf724e:44401 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,44401,1731026906683/c302dddf724e%2C44401%2C1731026906683.1731026908072, exclude list is [], retry=0 2024-11-08T00:48:28,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33807,DS-1215f7db-7ec6-4ee8-964e-f978599b7c57,DISK] 2024-11-08T00:48:28,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44001,DS-b4ee67b5-eafb-46f1-9643-b58926c54623,DISK] 2024-11-08T00:48:28,093 DEBUG [RS:1;c302dddf724e:36935 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,36935,1731026906786/c302dddf724e%2C36935%2C1731026906786.1731026908072, exclude list is [], retry=0 2024-11-08T00:48:28,095 INFO [RS:0;c302dddf724e:44401 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,44401,1731026906683/c302dddf724e%2C44401%2C1731026906683.1731026908072 2024-11-08T00:48:28,096 DEBUG [RS:0;c302dddf724e:44401 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42267:42267),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-08T00:48:28,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33807,DS-1215f7db-7ec6-4ee8-964e-f978599b7c57,DISK] 2024-11-08T00:48:28,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44001,DS-b4ee67b5-eafb-46f1-9643-b58926c54623,DISK] 2024-11-08T00:48:28,101 INFO [RS:1;c302dddf724e:36935 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,36935,1731026906786/c302dddf724e%2C36935%2C1731026906786.1731026908072 2024-11-08T00:48:28,103 DEBUG [RS:1;c302dddf724e:36935 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34211:34211),(127.0.0.1/127.0.0.1:42267:42267)] 2024-11-08T00:48:28,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:28,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:48:28,223 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:48:28,223 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:28,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:48:28,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:48:28,226 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:28,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:48:28,230 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:48:28,230 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,231 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:28,231 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:48:28,233 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:48:28,233 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:28,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:48:28,236 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740 2024-11-08T00:48:28,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740 2024-11-08T00:48:28,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:48:28,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:48:28,241 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:48:28,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:48:28,248 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:48:28,249 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64512409, jitterRate=-0.038690194487571716}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:48:28,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731026908218Initializing all the Stores at 1731026908220 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026908220Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026908220Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026908220Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026908220Cleaning up temporary data from old regions at 1731026908240 (+20 ms)Region opened successfully at 1731026908251 (+11 ms) 2024-11-08T00:48:28,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:48:28,251 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:48:28,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:48:28,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:48:28,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:48:28,253 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:48:28,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026908251Disabling compacts and flushes for region at 1731026908251Disabling writes for close at 1731026908251Writing region close event to WAL at 1731026908252 (+1 ms)Closed at 1731026908253 (+1 ms) 2024-11-08T00:48:28,257 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:48:28,257 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T00:48:28,264 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T00:48:28,273 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:48:28,276 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T00:48:28,429 DEBUG [c302dddf724e:46149 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-11-08T00:48:28,437 DEBUG [c302dddf724e:46149 {}] balancer.BalancerClusterState(204): Hosts are {c302dddf724e=0} racks are {/default-rack=0} 2024-11-08T00:48:28,444 DEBUG [c302dddf724e:46149 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T00:48:28,444 DEBUG [c302dddf724e:46149 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T00:48:28,444 DEBUG [c302dddf724e:46149 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T00:48:28,444 DEBUG [c302dddf724e:46149 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T00:48:28,444 INFO [c302dddf724e:46149 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T00:48:28,444 INFO [c302dddf724e:46149 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T00:48:28,445 DEBUG [c302dddf724e:46149 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T00:48:28,454 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:28,461 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c302dddf724e,44401,1731026906683, state=OPENING 2024-11-08T00:48:28,467 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T00:48:28,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:28,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:28,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:28,470 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:48:28,470 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:48:28,470 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:48:28,471 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:48:28,473 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c302dddf724e,44401,1731026906683}] 2024-11-08T00:48:28,648 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:48:28,651 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37283, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:48:28,662 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T00:48:28,663 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T00:48:28,663 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-08T00:48:28,666 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c302dddf724e%2C44401%2C1731026906683.meta, suffix=.meta, logDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,44401,1731026906683, archiveDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/oldWALs, maxLogs=32 2024-11-08T00:48:28,683 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,44401,1731026906683/c302dddf724e%2C44401%2C1731026906683.meta.1731026908668.meta, exclude list is [], retry=0 2024-11-08T00:48:28,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33807,DS-1215f7db-7ec6-4ee8-964e-f978599b7c57,DISK] 2024-11-08T00:48:28,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44001,DS-b4ee67b5-eafb-46f1-9643-b58926c54623,DISK] 2024-11-08T00:48:28,690 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/WALs/c302dddf724e,44401,1731026906683/c302dddf724e%2C44401%2C1731026906683.meta.1731026908668.meta 2024-11-08T00:48:28,690 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34211:34211),(127.0.0.1/127.0.0.1:42267:42267)] 2024-11-08T00:48:28,691 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:48:28,692 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T00:48:28,695 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T00:48:28,697 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T00:48:28,702 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T00:48:28,702 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:28,702 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T00:48:28,702 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T00:48:28,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:48:28,706 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:48:28,706 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,707 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:28,707 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:48:28,709 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:48:28,709 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:28,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:48:28,711 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:48:28,711 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:28,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:48:28,713 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:48:28,713 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,714 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:48:28,714 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:48:28,716 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740 2024-11-08T00:48:28,718 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740 2024-11-08T00:48:28,721 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:48:28,721 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:48:28,722 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:48:28,724 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:48:28,726 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64399453, jitterRate=-0.04037337005138397}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:48:28,726 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T00:48:28,728 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731026908703Writing region info on filesystem at 1731026908703Initializing all the Stores at 1731026908705 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026908705Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026908705Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026908705Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026908705Cleaning up temporary data from old regions at 1731026908721 (+16 ms)Running coprocessor post-open hooks at 1731026908726 (+5 ms)Region opened successfully at 1731026908727 (+1 ms) 2024-11-08T00:48:28,735 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731026908639 2024-11-08T00:48:28,746 DEBUG [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T00:48:28,746 INFO [RS_OPEN_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T00:48:28,748 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:28,750 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c302dddf724e,44401,1731026906683, state=OPEN 2024-11-08T00:48:28,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:48:28,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:48:28,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:48:28,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:48:28,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:48:28,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:48:28,753 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c302dddf724e,44401,1731026906683 2024-11-08T00:48:28,758 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T00:48:28,758 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c302dddf724e,44401,1731026906683 in 281 msec 2024-11-08T00:48:28,764 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T00:48:28,765 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 497 msec 2024-11-08T00:48:28,766 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:48:28,766 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T00:48:28,787 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:48:28,788 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c302dddf724e,44401,1731026906683, seqNum=-1] 2024-11-08T00:48:28,810 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:48:28,813 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38035, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:48:28,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1470 sec 2024-11-08T00:48:28,836 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731026908836, completionTime=-1 2024-11-08T00:48:28,840 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=2; waited=0ms, expected min=2 server(s), max=2 server(s), master is running 2024-11-08T00:48:28,840 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-08T00:48:28,873 INFO [master/c302dddf724e:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=2 2024-11-08T00:48:28,873 INFO [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731026968873 2024-11-08T00:48:28,873 INFO [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731027028873 2024-11-08T00:48:28,873 INFO [master/c302dddf724e:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 33 msec 2024-11-08T00:48:28,875 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-08T00:48:28,884 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,46149,1731026905945-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,884 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,46149,1731026905945-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,885 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,46149,1731026905945-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,886 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c302dddf724e:46149, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,887 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,888 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,894 DEBUG [master/c302dddf724e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T00:48:28,920 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.053sec 2024-11-08T00:48:28,922 INFO [master/c302dddf724e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(103): Quota table not found. Creating... 2024-11-08T00:48:28,924 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.HMaster(2490): Client=null/null create 'hbase:quota', {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:48:28,932 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:quota 2024-11-08T00:48:28,933 INFO [master/c302dddf724e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(107): Initializing quota support 2024-11-08T00:48:28,935 INFO [master/c302dddf724e:0:becomeActiveMaster {}] namespace.NamespaceStateManager(59): Namespace State Manager started. 2024-11-08T00:48:28,935 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:48:28,936 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:28,938 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:48:28,951 INFO [master/c302dddf724e:0:becomeActiveMaster {}] namespace.NamespaceStateManager(222): Finished updating state of 2 namespaces. 2024-11-08T00:48:28,951 INFO [master/c302dddf724e:0:becomeActiveMaster {}] namespace.NamespaceAuditor(50): NamespaceAuditor started. 2024-11-08T00:48:28,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741836_1012 (size=624) 2024-11-08T00:48:28,953 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741836_1012 (size=624) 2024-11-08T00:48:28,953 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,955 INFO [master/c302dddf724e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T00:48:28,956 INFO [master/c302dddf724e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T00:48:28,957 INFO [master/c302dddf724e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T00:48:28,957 INFO [master/c302dddf724e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T00:48:28,958 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,46149,1731026905945-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:48:28,959 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,46149,1731026905945-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T00:48:28,960 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2f57c4e8f93fd2814d7a8ce1d4ced5a1, NAME => 'hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:quota', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:28,964 DEBUG [master/c302dddf724e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T00:48:28,965 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T00:48:28,966 INFO [master/c302dddf724e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c302dddf724e,46149,1731026905945-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:48:28,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741837_1013 (size=38) 2024-11-08T00:48:28,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741837_1013 (size=38) 2024-11-08T00:48:28,972 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:28,972 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1722): Closing 2f57c4e8f93fd2814d7a8ce1d4ced5a1, disabling compactions & flushes 2024-11-08T00:48:28,972 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:28,972 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:28,972 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. after waiting 0 ms 2024-11-08T00:48:28,972 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:28,972 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1973): Closed hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:28,972 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2f57c4e8f93fd2814d7a8ce1d4ced5a1: Waiting for close lock at 1731026908972Disabling compacts and flushes for region at 1731026908972Disabling writes for close at 1731026908972Writing region close event to WAL at 1731026908972Closed at 1731026908972 2024-11-08T00:48:28,976 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:48:28,982 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1.","families":{"info":[{"qualifier":"regioninfo","vlen":37,"tag":[],"timestamp":"1731026908977"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026908977"}]},"ts":"1731026908977"} 2024-11-08T00:48:28,989 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:48:28,990 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:48:28,994 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026908991"}]},"ts":"1731026908991"} 2024-11-08T00:48:29,022 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLING in hbase:meta 2024-11-08T00:48:29,022 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {c302dddf724e=0} racks are {/default-rack=0} 2024-11-08T00:48:29,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T00:48:29,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T00:48:29,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T00:48:29,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T00:48:29,025 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T00:48:29,025 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T00:48:29,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T00:48:29,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=2f57c4e8f93fd2814d7a8ce1d4ced5a1, ASSIGN}] 2024-11-08T00:48:29,032 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=2f57c4e8f93fd2814d7a8ce1d4ced5a1, ASSIGN 2024-11-08T00:48:29,034 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=2f57c4e8f93fd2814d7a8ce1d4ced5a1, ASSIGN; state=OFFLINE, location=c302dddf724e,44401,1731026906683; forceNewPlan=false, retain=false 2024-11-08T00:48:29,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c7acb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:48:29,043 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-08T00:48:29,043 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-08T00:48:29,079 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c302dddf724e,46149,-1 for getting cluster id 2024-11-08T00:48:29,081 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T00:48:29,090 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0de92a3d-246f-491f-8d40-20aabd3b8c65' 2024-11-08T00:48:29,093 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T00:48:29,093 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0de92a3d-246f-491f-8d40-20aabd3b8c65" 2024-11-08T00:48:29,095 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28377c62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:48:29,095 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c302dddf724e,46149,-1] 2024-11-08T00:48:29,098 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T00:48:29,099 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:48:29,100 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59760, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T00:48:29,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@554c7a90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:48:29,103 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:48:29,110 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c302dddf724e,44401,1731026906683, seqNum=-1] 2024-11-08T00:48:29,110 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:48:29,112 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32804, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:48:29,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c302dddf724e,46149,1731026905945 2024-11-08T00:48:29,133 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:29,187 INFO [c302dddf724e:46149 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T00:48:29,187 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2f57c4e8f93fd2814d7a8ce1d4ced5a1, regionState=OPENING, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:29,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=2f57c4e8f93fd2814d7a8ce1d4ced5a1, ASSIGN because future has completed 2024-11-08T00:48:29,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f57c4e8f93fd2814d7a8ce1d4ced5a1, server=c302dddf724e,44401,1731026906683}] 2024-11-08T00:48:29,353 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:29,353 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2f57c4e8f93fd2814d7a8ce1d4ced5a1, NAME => 'hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:48:29,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table quota 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:29,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,356 INFO [StoreOpener-2f57c4e8f93fd2814d7a8ce1d4ced5a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family q of region 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,359 INFO [StoreOpener-2f57c4e8f93fd2814d7a8ce1d4ced5a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f57c4e8f93fd2814d7a8ce1d4ced5a1 columnFamilyName q 2024-11-08T00:48:29,359 DEBUG [StoreOpener-2f57c4e8f93fd2814d7a8ce1d4ced5a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:29,360 INFO [StoreOpener-2f57c4e8f93fd2814d7a8ce1d4ced5a1-1 {}] regionserver.HStore(327): Store=2f57c4e8f93fd2814d7a8ce1d4ced5a1/q, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:29,360 INFO [StoreOpener-2f57c4e8f93fd2814d7a8ce1d4ced5a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family u of region 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,362 INFO [StoreOpener-2f57c4e8f93fd2814d7a8ce1d4ced5a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2f57c4e8f93fd2814d7a8ce1d4ced5a1 columnFamilyName u 2024-11-08T00:48:29,362 DEBUG [StoreOpener-2f57c4e8f93fd2814d7a8ce1d4ced5a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:29,363 INFO [StoreOpener-2f57c4e8f93fd2814d7a8ce1d4ced5a1-1 {}] regionserver.HStore(327): Store=2f57c4e8f93fd2814d7a8ce1d4ced5a1/u, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:29,363 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,365 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,366 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,367 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,367 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,368 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:quota descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-08T00:48:29,370 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,375 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:48:29,376 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 2f57c4e8f93fd2814d7a8ce1d4ced5a1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75066068, jitterRate=0.11857157945632935}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-08T00:48:29,376 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:29,377 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2f57c4e8f93fd2814d7a8ce1d4ced5a1: Running coprocessor pre-open hook at 1731026909354Writing region info on filesystem at 1731026909354Initializing all the Stores at 1731026909356 (+2 ms)Instantiating store for column family {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026909356Instantiating store for column family {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026909356Cleaning up temporary data from old regions at 1731026909367 (+11 ms)Running coprocessor post-open hooks at 1731026909376 (+9 ms)Region opened successfully at 1731026909377 (+1 ms) 2024-11-08T00:48:29,379 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1., pid=6, masterSystemTime=1731026909347 2024-11-08T00:48:29,384 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:29,384 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:29,386 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2f57c4e8f93fd2814d7a8ce1d4ced5a1, regionState=OPEN, openSeqNum=2, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:29,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2f57c4e8f93fd2814d7a8ce1d4ced5a1, server=c302dddf724e,44401,1731026906683 because future has completed 2024-11-08T00:48:29,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T00:48:29,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2f57c4e8f93fd2814d7a8ce1d4ced5a1, server=c302dddf724e,44401,1731026906683 in 199 msec 2024-11-08T00:48:29,402 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T00:48:29,402 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=2f57c4e8f93fd2814d7a8ce1d4ced5a1, ASSIGN in 369 msec 2024-11-08T00:48:29,403 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:48:29,403 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026909403"}]},"ts":"1731026909403"} 2024-11-08T00:48:29,407 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLED in hbase:meta 2024-11-08T00:48:29,408 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:48:29,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:quota in 483 msec 2024-11-08T00:48:29,449 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-11-08T00:48:29,458 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T00:48:29,462 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c302dddf724e,46149,1731026905945 2024-11-08T00:48:29,466 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@73998704 2024-11-08T00:48:29,469 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T00:48:29,472 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T00:48:29,475 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-11-08T00:48:29,491 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:48:29,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 2024-11-08T00:48:29,497 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:48:29,498 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:29,499 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin0" procId is: 7 2024-11-08T00:48:29,501 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:48:29,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T00:48:29,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741838_1014 (size=391) 2024-11-08T00:48:29,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741838_1014 (size=391) 2024-11-08T00:48:29,524 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9ddb6d45779c6bfeec462c7bd486822f, NAME => 'TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:29,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741839_1015 (size=50) 2024-11-08T00:48:29,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741839_1015 (size=50) 2024-11-08T00:48:29,536 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:29,536 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1722): Closing 9ddb6d45779c6bfeec462c7bd486822f, disabling compactions & flushes 2024-11-08T00:48:29,536 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:29,536 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:29,536 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. after waiting 0 ms 2024-11-08T00:48:29,536 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:29,536 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:29,536 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9ddb6d45779c6bfeec462c7bd486822f: Waiting for close lock at 1731026909536Disabling compacts and flushes for region at 1731026909536Disabling writes for close at 1731026909536Writing region close event to WAL at 1731026909536Closed at 1731026909536 2024-11-08T00:48:29,539 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:48:29,539 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1731026909539"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026909539"}]},"ts":"1731026909539"} 2024-11-08T00:48:29,543 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:48:29,545 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:48:29,545 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026909545"}]},"ts":"1731026909545"} 2024-11-08T00:48:29,548 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLING in hbase:meta 2024-11-08T00:48:29,548 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {c302dddf724e=0} racks are {/default-rack=0} 2024-11-08T00:48:29,550 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T00:48:29,550 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T00:48:29,550 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T00:48:29,550 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T00:48:29,550 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T00:48:29,550 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T00:48:29,550 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T00:48:29,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, ASSIGN}] 2024-11-08T00:48:29,552 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, ASSIGN 2024-11-08T00:48:29,554 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, ASSIGN; state=OFFLINE, location=c302dddf724e,36935,1731026906786; forceNewPlan=false, retain=false 2024-11-08T00:48:29,704 INFO [c302dddf724e:46149 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T00:48:29,705 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=9ddb6d45779c6bfeec462c7bd486822f, regionState=OPENING, regionLocation=c302dddf724e,36935,1731026906786 2024-11-08T00:48:29,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, ASSIGN because future has completed 2024-11-08T00:48:29,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9ddb6d45779c6bfeec462c7bd486822f, server=c302dddf724e,36935,1731026906786}] 2024-11-08T00:48:29,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T00:48:29,862 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:48:29,864 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48605, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:48:29,870 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:29,870 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 9ddb6d45779c6bfeec462c7bd486822f, NAME => 'TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:48:29,871 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin0 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,871 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:29,871 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,871 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,873 INFO [StoreOpener-9ddb6d45779c6bfeec462c7bd486822f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,875 INFO [StoreOpener-9ddb6d45779c6bfeec462c7bd486822f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9ddb6d45779c6bfeec462c7bd486822f columnFamilyName cf 2024-11-08T00:48:29,875 DEBUG [StoreOpener-9ddb6d45779c6bfeec462c7bd486822f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:29,876 INFO [StoreOpener-9ddb6d45779c6bfeec462c7bd486822f-1 {}] regionserver.HStore(327): Store=9ddb6d45779c6bfeec462c7bd486822f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:29,876 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,878 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,878 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,879 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,879 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,882 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,885 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:48:29,886 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 9ddb6d45779c6bfeec462c7bd486822f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68467472, jitterRate=0.020244836807250977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:48:29,886 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:29,887 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 9ddb6d45779c6bfeec462c7bd486822f: Running coprocessor pre-open hook at 1731026909871Writing region info on filesystem at 1731026909871Initializing all the Stores at 1731026909873 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026909873Cleaning up temporary data from old regions at 1731026909879 (+6 ms)Running coprocessor post-open hooks at 1731026909886 (+7 ms)Region opened successfully at 1731026909887 (+1 ms) 2024-11-08T00:48:29,888 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., pid=9, masterSystemTime=1731026909862 2024-11-08T00:48:29,891 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:29,892 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:29,893 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=9ddb6d45779c6bfeec462c7bd486822f, regionState=OPEN, openSeqNum=2, regionLocation=c302dddf724e,36935,1731026906786 2024-11-08T00:48:29,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9ddb6d45779c6bfeec462c7bd486822f, server=c302dddf724e,36935,1731026906786 because future has completed 2024-11-08T00:48:29,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-08T00:48:29,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 9ddb6d45779c6bfeec462c7bd486822f, server=c302dddf724e,36935,1731026906786 in 190 msec 2024-11-08T00:48:29,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T00:48:29,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, ASSIGN in 351 msec 2024-11-08T00:48:29,906 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:48:29,906 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026909906"}]},"ts":"1731026909906"} 2024-11-08T00:48:29,909 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLED in hbase:meta 2024-11-08T00:48:29,910 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:48:29,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 in 418 msec 2024-11-08T00:48:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T00:48:30,277 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin0 completed 2024-11-08T00:48:30,278 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin0 get assigned. Timeout = 60000ms 2024-11-08T00:48:30,278 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:30,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin0 assigned to meta. Checking AM states. 2024-11-08T00:48:30,283 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:30,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin0 assigned. 2024-11-08T00:48:30,284 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:30,286 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-11-08T00:48:30,290 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-11-08T00:48:30,293 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:48:30,295 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34464, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:48:30,331 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T00:48:30,331 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is c302dddf724e,46149,1731026905945 2024-11-08T00:48:30,331 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@61624134 2024-11-08T00:48:30,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:48:30,335 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T00:48:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 2024-11-08T00:48:30,337 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35133, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=MasterService 2024-11-08T00:48:30,338 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:48:30,338 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:30,338 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin1" procId is: 10 2024-11-08T00:48:30,339 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:48:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-08T00:48:30,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741840_1016 (size=391) 2024-11-08T00:48:30,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741840_1016 (size=391) 2024-11-08T00:48:30,355 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0fa836ad0e77c5a1d34a9799d89291a2, NAME => 'TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:30,362 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:48:30,362 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c302dddf724e,44401,1731026906683, seqNum=-1] 2024-11-08T00:48:30,362 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:48:30,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43799, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-08T00:48:30,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741841_1017 (size=50) 2024-11-08T00:48:30,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741841_1017 (size=50) 2024-11-08T00:48:30,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1., hostname=c302dddf724e,44401,1731026906683, seqNum=2] 2024-11-08T00:48:30,372 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:30,373 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1722): Closing 0fa836ad0e77c5a1d34a9799d89291a2, disabling compactions & flushes 2024-11-08T00:48:30,373 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:30,373 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:30,373 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. after waiting 0 ms 2024-11-08T00:48:30,373 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:30,373 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:30,373 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0fa836ad0e77c5a1d34a9799d89291a2: Waiting for close lock at 1731026910373Disabling compacts and flushes for region at 1731026910373Disabling writes for close at 1731026910373Writing region close event to WAL at 1731026910373Closed at 1731026910373 2024-11-08T00:48:30,386 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:48:30,387 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1731026910386"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026910386"}]},"ts":"1731026910386"} 2024-11-08T00:48:30,390 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:48:30,392 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:48:30,392 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026910392"}]},"ts":"1731026910392"} 2024-11-08T00:48:30,395 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLING in hbase:meta 2024-11-08T00:48:30,395 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {c302dddf724e=0} racks are {/default-rack=0} 2024-11-08T00:48:30,396 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T00:48:30,396 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T00:48:30,396 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T00:48:30,396 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T00:48:30,396 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T00:48:30,396 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T00:48:30,396 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T00:48:30,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, ASSIGN}] 2024-11-08T00:48:30,400 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, ASSIGN 2024-11-08T00:48:30,401 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, ASSIGN; state=OFFLINE, location=c302dddf724e,44401,1731026906683; forceNewPlan=false, retain=false 2024-11-08T00:48:30,552 INFO [c302dddf724e:46149 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T00:48:30,552 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0fa836ad0e77c5a1d34a9799d89291a2, regionState=OPENING, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:30,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, ASSIGN because future has completed 2024-11-08T00:48:30,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0fa836ad0e77c5a1d34a9799d89291a2, server=c302dddf724e,44401,1731026906683}] 2024-11-08T00:48:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-08T00:48:30,714 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:30,715 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 0fa836ad0e77c5a1d34a9799d89291a2, NAME => 'TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:48:30,715 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin1 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,715 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:30,715 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,715 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,718 INFO [StoreOpener-0fa836ad0e77c5a1d34a9799d89291a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,721 INFO [StoreOpener-0fa836ad0e77c5a1d34a9799d89291a2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0fa836ad0e77c5a1d34a9799d89291a2 columnFamilyName cf 2024-11-08T00:48:30,721 DEBUG [StoreOpener-0fa836ad0e77c5a1d34a9799d89291a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:30,722 INFO [StoreOpener-0fa836ad0e77c5a1d34a9799d89291a2-1 {}] regionserver.HStore(327): Store=0fa836ad0e77c5a1d34a9799d89291a2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:30,722 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,723 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,724 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,724 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,724 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,727 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,731 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:48:30,731 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 0fa836ad0e77c5a1d34a9799d89291a2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67430867, jitterRate=0.004798218607902527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:48:30,731 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:30,732 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 0fa836ad0e77c5a1d34a9799d89291a2: Running coprocessor pre-open hook at 1731026910716Writing region info on filesystem at 1731026910716Initializing all the Stores at 1731026910717 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026910717Cleaning up temporary data from old regions at 1731026910724 (+7 ms)Running coprocessor post-open hooks at 1731026910731 (+7 ms)Region opened successfully at 1731026910732 (+1 ms) 2024-11-08T00:48:30,734 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2., pid=12, masterSystemTime=1731026910709 2024-11-08T00:48:30,737 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:30,737 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:30,738 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0fa836ad0e77c5a1d34a9799d89291a2, regionState=OPEN, openSeqNum=2, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:30,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0fa836ad0e77c5a1d34a9799d89291a2, server=c302dddf724e,44401,1731026906683 because future has completed 2024-11-08T00:48:30,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-08T00:48:30,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 0fa836ad0e77c5a1d34a9799d89291a2, server=c302dddf724e,44401,1731026906683 in 187 msec 2024-11-08T00:48:30,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-11-08T00:48:30,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, ASSIGN in 350 msec 2024-11-08T00:48:30,750 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:48:30,751 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026910751"}]},"ts":"1731026910751"} 2024-11-08T00:48:30,754 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLED in hbase:meta 2024-11-08T00:48:30,755 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:48:30,758 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 in 421 msec 2024-11-08T00:48:31,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-08T00:48:31,108 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin1 completed 2024-11-08T00:48:31,108 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin1 get assigned. Timeout = 60000ms 2024-11-08T00:48:31,108 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:31,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin1 assigned to meta. Checking AM states. 2024-11-08T00:48:31,112 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:31,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin1 assigned. 2024-11-08T00:48:31,113 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:31,114 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-11-08T00:48:31,120 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-11-08T00:48:31,128 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T00:48:31,128 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is c302dddf724e,46149,1731026905945 2024-11-08T00:48:31,128 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6dfad088 2024-11-08T00:48:31,128 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T00:48:31,130 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34685, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=MasterService 2024-11-08T00:48:31,131 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:48:31,133 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:48:31,133 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c302dddf724e,44401,1731026906683, seqNum=-1] 2024-11-08T00:48:31,133 DEBUG [regionserver/c302dddf724e:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:48:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 2024-11-08T00:48:31,134 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50383, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-08T00:48:31,136 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:48:31,137 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:31,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin2" procId is: 13 2024-11-08T00:48:31,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1., hostname=c302dddf724e,44401,1731026906683, seqNum=2] 2024-11-08T00:48:31,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-08T00:48:31,138 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:48:31,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741842_1018 (size=391) 2024-11-08T00:48:31,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741842_1018 (size=391) 2024-11-08T00:48:31,161 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5722bdba1189d7abc2c73c96dd29b2f1, NAME => 'TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:31,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741843_1019 (size=50) 2024-11-08T00:48:31,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741843_1019 (size=50) 2024-11-08T00:48:31,172 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:31,172 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1722): Closing 5722bdba1189d7abc2c73c96dd29b2f1, disabling compactions & flushes 2024-11-08T00:48:31,172 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:31,172 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:31,172 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. after waiting 0 ms 2024-11-08T00:48:31,172 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:31,172 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:31,172 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5722bdba1189d7abc2c73c96dd29b2f1: Waiting for close lock at 1731026911172Disabling compacts and flushes for region at 1731026911172Disabling writes for close at 1731026911172Writing region close event to WAL at 1731026911172Closed at 1731026911172 2024-11-08T00:48:31,174 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:48:31,174 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1731026911174"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026911174"}]},"ts":"1731026911174"} 2024-11-08T00:48:31,177 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:48:31,179 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:48:31,179 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026911179"}]},"ts":"1731026911179"} 2024-11-08T00:48:31,181 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLING in hbase:meta 2024-11-08T00:48:31,181 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {c302dddf724e=0} racks are {/default-rack=0} 2024-11-08T00:48:31,182 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T00:48:31,182 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T00:48:31,183 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T00:48:31,183 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T00:48:31,183 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T00:48:31,183 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T00:48:31,183 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T00:48:31,183 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, ASSIGN}] 2024-11-08T00:48:31,184 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, ASSIGN 2024-11-08T00:48:31,186 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, ASSIGN; state=OFFLINE, location=c302dddf724e,44401,1731026906683; forceNewPlan=false, retain=false 2024-11-08T00:48:31,336 INFO [c302dddf724e:46149 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T00:48:31,337 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=5722bdba1189d7abc2c73c96dd29b2f1, regionState=OPENING, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:31,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, ASSIGN because future has completed 2024-11-08T00:48:31,341 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5722bdba1189d7abc2c73c96dd29b2f1, server=c302dddf724e,44401,1731026906683}] 2024-11-08T00:48:31,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-08T00:48:31,499 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(132): Open TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:31,499 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7752): Opening region: {ENCODED => 5722bdba1189d7abc2c73c96dd29b2f1, NAME => 'TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:48:31,499 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin2 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,499 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:31,500 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7794): checking encryption for 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,500 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7797): checking classloading for 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,502 INFO [StoreOpener-5722bdba1189d7abc2c73c96dd29b2f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,504 INFO [StoreOpener-5722bdba1189d7abc2c73c96dd29b2f1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5722bdba1189d7abc2c73c96dd29b2f1 columnFamilyName cf 2024-11-08T00:48:31,504 DEBUG [StoreOpener-5722bdba1189d7abc2c73c96dd29b2f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:31,505 INFO [StoreOpener-5722bdba1189d7abc2c73c96dd29b2f1-1 {}] regionserver.HStore(327): Store=5722bdba1189d7abc2c73c96dd29b2f1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:31,505 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1038): replaying wal for 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,506 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,506 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,507 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1048): stopping wal replay for 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,507 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1060): Cleaning up temporary data for 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,509 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1093): writing seq id for 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,512 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:48:31,513 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1114): Opened 5722bdba1189d7abc2c73c96dd29b2f1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72351353, jitterRate=0.0781191736459732}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:48:31,513 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:31,514 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1006): Region open journal for 5722bdba1189d7abc2c73c96dd29b2f1: Running coprocessor pre-open hook at 1731026911500Writing region info on filesystem at 1731026911500Initializing all the Stores at 1731026911501 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026911501Cleaning up temporary data from old regions at 1731026911507 (+6 ms)Running coprocessor post-open hooks at 1731026911513 (+6 ms)Region opened successfully at 1731026911514 (+1 ms) 2024-11-08T00:48:31,515 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1., pid=15, masterSystemTime=1731026911493 2024-11-08T00:48:31,518 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:31,518 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:31,519 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=5722bdba1189d7abc2c73c96dd29b2f1, regionState=OPEN, openSeqNum=2, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:31,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5722bdba1189d7abc2c73c96dd29b2f1, server=c302dddf724e,44401,1731026906683 because future has completed 2024-11-08T00:48:31,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-11-08T00:48:31,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; OpenRegionProcedure 5722bdba1189d7abc2c73c96dd29b2f1, server=c302dddf724e,44401,1731026906683 in 183 msec 2024-11-08T00:48:31,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-08T00:48:31,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, ASSIGN in 343 msec 2024-11-08T00:48:31,530 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:48:31,531 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026911531"}]},"ts":"1731026911531"} 2024-11-08T00:48:31,533 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLED in hbase:meta 2024-11-08T00:48:31,534 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:48:31,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 in 402 msec 2024-11-08T00:48:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-08T00:48:31,908 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin2 completed 2024-11-08T00:48:31,908 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin2 get assigned. Timeout = 60000ms 2024-11-08T00:48:31,909 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:31,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin2 assigned to meta. Checking AM states. 2024-11-08T00:48:31,915 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:31,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin2 assigned. 2024-11-08T00:48:31,915 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:31,918 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-11-08T00:48:31,923 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-11-08T00:48:31,933 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.HMaster$18(3529): Client=jenkins//172.17.0.2 creating {NAME => 'TestNs'} 2024-11-08T00:48:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=16, state=RUNNABLE:CREATE_NAMESPACE_PREPARE, hasLock=false; CreateNamespaceProcedure, namespace=TestNs 2024-11-08T00:48:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-11-08T00:48:31,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, state=SUCCESS, hasLock=false; CreateNamespaceProcedure, namespace=TestNs in 15 msec 2024-11-08T00:48:32,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-11-08T00:48:32,198 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: CREATE_NAMESPACE, Namespace: TestNs completed 2024-11-08T00:48:32,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestNs:TestTable', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:48:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestNs:TestTable 2024-11-08T00:48:32,202 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:48:32,202 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:32,202 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "TestNs" qualifier: "TestTable" procId is: 17 2024-11-08T00:48:32,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-08T00:48:32,204 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:48:32,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741844_1020 (size=358) 2024-11-08T00:48:32,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741844_1020 (size=358) 2024-11-08T00:48:32,216 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9658ac49e4a052fe7d43286a103ecab9, NAME => 'TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:32,217 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 822a2b407be6bb38301bc44c8cd44212, NAME => 'TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4 2024-11-08T00:48:32,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741845_1021 (size=44) 2024-11-08T00:48:32,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741845_1021 (size=44) 2024-11-08T00:48:32,232 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:32,232 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1722): Closing 9658ac49e4a052fe7d43286a103ecab9, disabling compactions & flushes 2024-11-08T00:48:32,232 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:32,232 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:32,232 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. after waiting 0 ms 2024-11-08T00:48:32,232 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:32,232 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:32,232 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9658ac49e4a052fe7d43286a103ecab9: Waiting for close lock at 1731026912232Disabling compacts and flushes for region at 1731026912232Disabling writes for close at 1731026912232Writing region close event to WAL at 1731026912232Closed at 1731026912232 2024-11-08T00:48:32,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741846_1022 (size=44) 2024-11-08T00:48:32,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741846_1022 (size=44) 2024-11-08T00:48:32,237 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:32,238 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1722): Closing 822a2b407be6bb38301bc44c8cd44212, disabling compactions & flushes 2024-11-08T00:48:32,238 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:32,238 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:32,238 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. after waiting 0 ms 2024-11-08T00:48:32,238 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:32,238 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:32,238 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1676): Region close journal for 822a2b407be6bb38301bc44c8cd44212: Waiting for close lock at 1731026912238Disabling compacts and flushes for region at 1731026912238Disabling writes for close at 1731026912238Writing region close event to WAL at 1731026912238Closed at 1731026912238 2024-11-08T00:48:32,240 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:48:32,240 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1731026912240"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026912240"}]},"ts":"1731026912240"} 2024-11-08T00:48:32,240 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1731026912240"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026912240"}]},"ts":"1731026912240"} 2024-11-08T00:48:32,246 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-08T00:48:32,248 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:48:32,248 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026912248"}]},"ts":"1731026912248"} 2024-11-08T00:48:32,250 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLING in hbase:meta 2024-11-08T00:48:32,250 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {c302dddf724e=0} racks are {/default-rack=0} 2024-11-08T00:48:32,252 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T00:48:32,252 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T00:48:32,252 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T00:48:32,252 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T00:48:32,252 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T00:48:32,252 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T00:48:32,252 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T00:48:32,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, ASSIGN}, {pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, ASSIGN}] 2024-11-08T00:48:32,254 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, ASSIGN 2024-11-08T00:48:32,254 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, ASSIGN 2024-11-08T00:48:32,255 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, ASSIGN; state=OFFLINE, location=c302dddf724e,36935,1731026906786; forceNewPlan=false, retain=false 2024-11-08T00:48:32,255 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, ASSIGN; state=OFFLINE, location=c302dddf724e,44401,1731026906683; forceNewPlan=false, retain=false 2024-11-08T00:48:32,406 INFO [c302dddf724e:46149 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-08T00:48:32,406 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=9658ac49e4a052fe7d43286a103ecab9, regionState=OPENING, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:32,406 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=822a2b407be6bb38301bc44c8cd44212, regionState=OPENING, regionLocation=c302dddf724e,36935,1731026906786 2024-11-08T00:48:32,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, ASSIGN because future has completed 2024-11-08T00:48:32,410 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9658ac49e4a052fe7d43286a103ecab9, server=c302dddf724e,44401,1731026906683}] 2024-11-08T00:48:32,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, ASSIGN because future has completed 2024-11-08T00:48:32,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 822a2b407be6bb38301bc44c8cd44212, server=c302dddf724e,36935,1731026906786}] 2024-11-08T00:48:32,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-08T00:48:32,569 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:32,569 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 9658ac49e4a052fe7d43286a103ecab9, NAME => 'TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9.', STARTKEY => '', ENDKEY => '1'} 2024-11-08T00:48:32,570 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,570 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:32,570 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,570 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:32,570 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,570 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => 822a2b407be6bb38301bc44c8cd44212, NAME => 'TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212.', STARTKEY => '1', ENDKEY => ''} 2024-11-08T00:48:32,571 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,571 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:48:32,571 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,571 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,572 INFO [StoreOpener-9658ac49e4a052fe7d43286a103ecab9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,573 INFO [StoreOpener-822a2b407be6bb38301bc44c8cd44212-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,574 INFO [StoreOpener-9658ac49e4a052fe7d43286a103ecab9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9658ac49e4a052fe7d43286a103ecab9 columnFamilyName cf 2024-11-08T00:48:32,574 DEBUG [StoreOpener-9658ac49e4a052fe7d43286a103ecab9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:32,574 INFO [StoreOpener-822a2b407be6bb38301bc44c8cd44212-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 822a2b407be6bb38301bc44c8cd44212 columnFamilyName cf 2024-11-08T00:48:32,574 DEBUG [StoreOpener-822a2b407be6bb38301bc44c8cd44212-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:48:32,575 INFO [StoreOpener-9658ac49e4a052fe7d43286a103ecab9-1 {}] regionserver.HStore(327): Store=9658ac49e4a052fe7d43286a103ecab9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:32,575 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,575 INFO [StoreOpener-822a2b407be6bb38301bc44c8cd44212-1 {}] regionserver.HStore(327): Store=822a2b407be6bb38301bc44c8cd44212/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:48:32,575 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,576 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,576 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,577 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,577 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,577 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,577 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,578 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,578 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,580 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,581 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,583 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:48:32,583 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:48:32,584 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 9658ac49e4a052fe7d43286a103ecab9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63129425, jitterRate=-0.05929826200008392}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:48:32,584 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:32,584 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened 822a2b407be6bb38301bc44c8cd44212; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66026746, jitterRate=-0.016124814748764038}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:48:32,584 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:32,585 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for 822a2b407be6bb38301bc44c8cd44212: Running coprocessor pre-open hook at 1731026912571Writing region info on filesystem at 1731026912571Initializing all the Stores at 1731026912572 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026912572Cleaning up temporary data from old regions at 1731026912578 (+6 ms)Running coprocessor post-open hooks at 1731026912584 (+6 ms)Region opened successfully at 1731026912585 (+1 ms) 2024-11-08T00:48:32,585 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 9658ac49e4a052fe7d43286a103ecab9: Running coprocessor pre-open hook at 1731026912570Writing region info on filesystem at 1731026912570Initializing all the Stores at 1731026912571 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026912572 (+1 ms)Cleaning up temporary data from old regions at 1731026912577 (+5 ms)Running coprocessor post-open hooks at 1731026912584 (+7 ms)Region opened successfully at 1731026912585 (+1 ms) 2024-11-08T00:48:32,586 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., pid=21, masterSystemTime=1731026912566 2024-11-08T00:48:32,586 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9., pid=20, masterSystemTime=1731026912564 2024-11-08T00:48:32,589 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:32,589 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:32,590 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=9658ac49e4a052fe7d43286a103ecab9, regionState=OPEN, openSeqNum=2, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:32,591 DEBUG [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:32,591 INFO [RS_OPEN_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:32,591 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=822a2b407be6bb38301bc44c8cd44212, regionState=OPEN, openSeqNum=2, regionLocation=c302dddf724e,36935,1731026906786 2024-11-08T00:48:32,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9658ac49e4a052fe7d43286a103ecab9, server=c302dddf724e,44401,1731026906683 because future has completed 2024-11-08T00:48:32,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 822a2b407be6bb38301bc44c8cd44212, server=c302dddf724e,36935,1731026906786 because future has completed 2024-11-08T00:48:32,597 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=18 2024-11-08T00:48:32,597 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=18, state=SUCCESS, hasLock=false; OpenRegionProcedure 9658ac49e4a052fe7d43286a103ecab9, server=c302dddf724e,44401,1731026906683 in 184 msec 2024-11-08T00:48:32,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-08T00:48:32,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 822a2b407be6bb38301bc44c8cd44212, server=c302dddf724e,36935,1731026906786 in 184 msec 2024-11-08T00:48:32,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, ASSIGN in 345 msec 2024-11-08T00:48:32,602 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=17 2024-11-08T00:48:32,602 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, ASSIGN in 347 msec 2024-11-08T00:48:32,603 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:48:32,603 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026912603"}]},"ts":"1731026912603"} 2024-11-08T00:48:32,605 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLED in hbase:meta 2024-11-08T00:48:32,606 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:48:32,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestNs:TestTable in 407 msec 2024-11-08T00:48:32,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-08T00:48:32,977 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: TestNs:TestTable completed 2024-11-08T00:48:32,977 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestNs:TestTable get assigned. Timeout = 60000ms 2024-11-08T00:48:32,977 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:32,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestNs:TestTable assigned to meta. Checking AM states. 2024-11-08T00:48:32,982 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:32,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestNs:TestTable assigned. 2024-11-08T00:48:32,983 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:32,985 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-11-08T00:48:32,989 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-11-08T00:48:33,017 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=304, OpenFileDescriptor=543, MaxFileDescriptor=1048576, SystemLoadAverage=681, ProcessCount=11, AvailableMemoryMB=2560 2024-11-08T00:48:33,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='u.jenkins', locateType=CURRENT is [region=hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1., hostname=c302dddf724e,44401,1731026906683, seqNum=2] 2024-11-08T00:48:33,297 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:33,298 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:33,298 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731030513046 bypass), TestNs=QuotaState(ts=1731030513046 bypass)} 2024-11-08T00:48:33,298 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731030513046 bypass), TestQuotaAdmin2=QuotaState(ts=1731030513046 bypass), TestQuotaAdmin1=QuotaState(ts=1731030513046 bypass)} 2024-11-08T00:48:33,298 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731030513046 [ TestNs:TestTable ])} 2024-11-08T00:48:33,298 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731030513046 bypass)} 2024-11-08T00:48:33,549 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:33,549 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:33,549 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731030513046 bypass), TestNs=QuotaState(ts=1731030513046 bypass)} 2024-11-08T00:48:33,549 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731030513046 bypass), TestNs:TestTable=QuotaState(ts=1731030513046 bypass)} 2024-11-08T00:48:33,549 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731030513046 [ TestNs:TestTable ])} 2024-11-08T00:48:33,549 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731030513046 bypass)} 2024-11-08T00:48:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-08T00:48:33,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:34464 deadline: 1731026923564, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-08T00:48:33,591 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:33,592 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:33,592 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:33,592 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:33,593 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:33.592Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:33,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-08T00:48:33,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:34464 deadline: 1731026923595, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-08T00:48:33,597 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:33,597 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:33,597 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:33,597 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:33,598 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:33.597Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:33,859 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:33,859 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:33,859 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731034113046 bypass), TestNs=QuotaState(ts=1731034113046 bypass)} 2024-11-08T00:48:33,860 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731034113046 bypass), TestQuotaAdmin2=QuotaState(ts=1731034113046 bypass), TestQuotaAdmin1=QuotaState(ts=1731034113046 bypass)} 2024-11-08T00:48:33,860 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731034113046 bypass)} 2024-11-08T00:48:33,860 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731034113046 bypass)} 2024-11-08T00:48:34,110 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:34,110 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:34,111 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731034113046 bypass), TestNs=QuotaState(ts=1731034113046 bypass)} 2024-11-08T00:48:34,111 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731034113046 bypass), TestNs:TestTable=QuotaState(ts=1731034113046 bypass)} 2024-11-08T00:48:34,111 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731034113046 bypass)} 2024-11-08T00:48:34,111 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731034113046 bypass)} 2024-11-08T00:48:34,127 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=303 (was 304), OpenFileDescriptor=543 (was 543), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=681 (was 681), ProcessCount=11 (was 11), AvailableMemoryMB=2483 (was 2560) 2024-11-08T00:48:34,140 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=303, OpenFileDescriptor=543, MaxFileDescriptor=1048576, SystemLoadAverage=681, ProcessCount=11, AvailableMemoryMB=2481 2024-11-08T00:48:34,206 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-08T00:48:34,255 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T00:48:34,257 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestNs:TestTable' 2024-11-08T00:48:34,257 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin2' 2024-11-08T00:48:34,258 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin1' 2024-11-08T00:48:34,258 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:quota' 2024-11-08T00:48:34,259 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin0' 2024-11-08T00:48:34,405 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:34,405 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-08T00:48:34,656 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:34,656 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731037713146 bypass), TestNs=QuotaState(ts=1731037713146 bypass)} 2024-11-08T00:48:34,656 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731037713146 bypass), TestQuotaAdmin2=QuotaState(ts=1731037713146 bypass), TestQuotaAdmin1=QuotaState(ts=1731037713146 bypass)} 2024-11-08T00:48:34,656 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731037713146 [ default ])} 2024-11-08T00:48:34,656 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731037713146 bypass)} 2024-11-08T00:48:34,906 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:34,907 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-08T00:48:35,157 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:35,157 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731037713246 bypass), TestNs=QuotaState(ts=1731037713246 bypass)} 2024-11-08T00:48:35,157 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731037713246 bypass), TestNs:TestTable=QuotaState(ts=1731037713246 bypass)} 2024-11-08T00:48:35,157 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731037713246 [ default ])} 2024-11-08T00:48:35,157 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731037713246 bypass)} 2024-11-08T00:48:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 12sec, 0ms 2024-11-08T00:48:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:34464 deadline: 1731026925165, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms 2024-11-08T00:48:35,167 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:35,167 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:35,167 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:35,167 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:35,168 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:35.167Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:199) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:35,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-11-08T00:48:35,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:34464 deadline: 1731026925185, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-11-08T00:48:35,187 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:35,187 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:35,188 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:35,188 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:35,189 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:35.188Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:200) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:35,446 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:35,446 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:35,446 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731041313246 bypass), TestNs=QuotaState(ts=1731041313246 bypass)} 2024-11-08T00:48:35,446 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731041313246 bypass), TestQuotaAdmin2=QuotaState(ts=1731041313246 bypass), TestQuotaAdmin1=QuotaState(ts=1731041313246 bypass)} 2024-11-08T00:48:35,446 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731041313246 bypass)} 2024-11-08T00:48:35,446 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731041313246 bypass)} 2024-11-08T00:48:35,697 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:35,697 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:35,697 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731041313246 bypass), TestNs=QuotaState(ts=1731041313246 bypass)} 2024-11-08T00:48:35,697 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731041313246 bypass), TestNs:TestTable=QuotaState(ts=1731041313246 bypass)} 2024-11-08T00:48:35,697 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731041313246 bypass)} 2024-11-08T00:48:35,697 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731041313246 bypass)} 2024-11-08T00:48:35,708 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=303 (was 303), OpenFileDescriptor=542 (was 543), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=681 (was 681), ProcessCount=11 (was 11), AvailableMemoryMB=2442 (was 2481) 2024-11-08T00:48:35,718 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=303, OpenFileDescriptor=542, MaxFileDescriptor=1048576, SystemLoadAverage=681, ProcessCount=11, AvailableMemoryMB=2442 2024-11-08T00:48:35,983 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:35,983 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-08T00:48:36,233 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:36,234 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731044913346 bypass), TestNs=QuotaState(ts=1731044913346 bypass)} 2024-11-08T00:48:36,234 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731044913346 bypass), TestQuotaAdmin2=QuotaState(ts=1731044913346 bypass), TestQuotaAdmin1=QuotaState(ts=1731044913346 bypass)} 2024-11-08T00:48:36,234 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731044913346 global-limiter)} 2024-11-08T00:48:36,234 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731044913346 bypass)} 2024-11-08T00:48:36,369 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-08T00:48:36,370 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-08T00:48:36,371 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 2024-11-08T00:48:36,371 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 Metrics about Tables on a single HBase RegionServer 2024-11-08T00:48:36,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 2024-11-08T00:48:36,372 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 Metrics about Tables on a single HBase RegionServer 2024-11-08T00:48:36,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_quota 2024-11-08T00:48:36,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_quota Metrics about Tables on a single HBase RegionServer 2024-11-08T00:48:36,374 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T00:48:36,374 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-08T00:48:36,374 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable 2024-11-08T00:48:36,374 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable Metrics about Tables on a single HBase RegionServer 2024-11-08T00:48:36,375 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-11-08T00:48:36,375 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver Metrics about HBase MasterObservers 2024-11-08T00:48:36,376 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:48:36,376 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-08T00:48:36,376 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 2024-11-08T00:48:36,376 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 Metrics about Tables on a single HBase RegionServer 2024-11-08T00:48:36,484 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:36,484 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-08T00:48:36,735 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:36,735 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731044913446 bypass), TestNs=QuotaState(ts=1731044913446 bypass)} 2024-11-08T00:48:36,735 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731044913446 bypass), TestNs:TestTable=QuotaState(ts=1731044913446 bypass)} 2024-11-08T00:48:36,735 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731044913446 global-limiter)} 2024-11-08T00:48:36,735 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731044913446 bypass)} 2024-11-08T00:48:36,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-11-08T00:48:36,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:34464 deadline: 1731026926754, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-11-08T00:48:36,756 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:36,757 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:36,757 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:36,757 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:36,758 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:36.757Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:178) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:36,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 20sec, 0ms 2024-11-08T00:48:36,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:34464 deadline: 1731026926765, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms 2024-11-08T00:48:36,767 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:36,767 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:36,767 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:36,768 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 20000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:36,768 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=3 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:36.767Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:179) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:37,026 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:37,026 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:37,026 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731048513446 bypass), TestNs=QuotaState(ts=1731048513446 bypass)} 2024-11-08T00:48:37,026 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731048513446 bypass), TestQuotaAdmin2=QuotaState(ts=1731048513446 bypass), TestQuotaAdmin1=QuotaState(ts=1731048513446 bypass)} 2024-11-08T00:48:37,026 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731048513446 bypass)} 2024-11-08T00:48:37,026 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731048513446 bypass)} 2024-11-08T00:48:37,277 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:37,277 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:37,277 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731048513446 bypass), TestNs=QuotaState(ts=1731048513446 bypass)} 2024-11-08T00:48:37,277 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731048513446 bypass), TestNs:TestTable=QuotaState(ts=1731048513446 bypass)} 2024-11-08T00:48:37,277 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731048513446 bypass)} 2024-11-08T00:48:37,277 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731048513446 bypass)} 2024-11-08T00:48:37,287 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=301 (was 303), OpenFileDescriptor=537 (was 542), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=658 (was 681), ProcessCount=11 (was 11), AvailableMemoryMB=2415 (was 2442) 2024-11-08T00:48:37,296 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=301, OpenFileDescriptor=537, MaxFileDescriptor=1048576, SystemLoadAverage=658, ProcessCount=11, AvailableMemoryMB=2414 2024-11-08T00:48:37,554 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:37,554 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:37,555 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731052113446 bypass), TestNs=QuotaState(ts=1731052113446 bypass)} 2024-11-08T00:48:37,557 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731052113446 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000))), TestQuotaAdmin2=QuotaState(ts=1731052113446 bypass), TestQuotaAdmin1=QuotaState(ts=1731052113446 bypass)} 2024-11-08T00:48:37,557 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-08T00:48:37,557 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731052113446 bypass)} 2024-11-08T00:48:37,807 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:37,808 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:37,808 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731052113446 bypass), TestNs=QuotaState(ts=1731052113446 bypass)} 2024-11-08T00:48:37,808 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731052113446 bypass), TestNs:TestTable=QuotaState(ts=1731052113446 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000)))} 2024-11-08T00:48:37,808 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-08T00:48:37,808 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731052113446 bypass)} 2024-11-08T00:48:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-08T00:48:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:34464 deadline: 1731026927825, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-08T00:48:37,827 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:37,828 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:37,828 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:37,828 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:37,828 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:37.828Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-08T00:48:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:34464 deadline: 1731026927829, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-08T00:48:37,831 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:37,831 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:37,831 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:37,831 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:37,832 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:37.831Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:38,089 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:38,090 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:38,090 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731055713446 bypass), TestNs=QuotaState(ts=1731055713446 bypass)} 2024-11-08T00:48:38,090 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731055713446 bypass), TestQuotaAdmin2=QuotaState(ts=1731055713446 bypass), TestQuotaAdmin1=QuotaState(ts=1731055713446 bypass)} 2024-11-08T00:48:38,090 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-08T00:48:38,090 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731055713446 bypass)} 2024-11-08T00:48:38,340 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:38,340 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:38,340 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731055713446 bypass), TestNs=QuotaState(ts=1731055713446 bypass)} 2024-11-08T00:48:38,340 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731055713446 bypass), TestNs:TestTable=QuotaState(ts=1731055713446 bypass)} 2024-11-08T00:48:38,340 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731055713446 bypass)} 2024-11-08T00:48:38,340 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731055713446 bypass)} 2024-11-08T00:48:38,350 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=301 (was 301), OpenFileDescriptor=537 (was 537), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=658 (was 658), ProcessCount=11 (was 11), AvailableMemoryMB=2416 (was 2414) - AvailableMemoryMB LEAK? - 2024-11-08T00:48:38,359 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=301, OpenFileDescriptor=537, MaxFileDescriptor=1048576, SystemLoadAverage=658, ProcessCount=11, AvailableMemoryMB=2416 2024-11-08T00:48:38,620 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:38,620 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:38,621 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731059313446 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1731059313446 bypass)} 2024-11-08T00:48:38,621 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731059313446 bypass), TestQuotaAdmin2=QuotaState(ts=1731059313446 bypass), TestQuotaAdmin1=QuotaState(ts=1731059313446 bypass)} 2024-11-08T00:48:38,621 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-08T00:48:38,621 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731059313446 bypass)} 2024-11-08T00:48:38,871 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:38,871 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:38,871 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731059313446 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1731059313446 bypass)} 2024-11-08T00:48:38,871 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731059313446 bypass), TestNs:TestTable=QuotaState(ts=1731059313446 bypass)} 2024-11-08T00:48:38,871 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-08T00:48:38,871 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731059313446 bypass)} 2024-11-08T00:48:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 12sec, 0ms 2024-11-08T00:48:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:34464 deadline: 1731026928891, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms 2024-11-08T00:48:38,894 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:38,894 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:38,894 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:38,894 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:38,894 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:38.894Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:128) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 10sec, 0ms 2024-11-08T00:48:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36935 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:34464 deadline: 1731026928908, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms 2024-11-08T00:48:38,910 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:38,910 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:48:38,910 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., hostname=c302dddf724e,36935,1731026906786, seqNum=-1 because the exception is null or not the one we care about 2024-11-08T00:48:38,910 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:38,911 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-08T00:48:38.910Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:129) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-08T00:48:39,169 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:39,169 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:39,169 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731062913446 bypass), TestNs=QuotaState(ts=1731062913446 bypass)} 2024-11-08T00:48:39,169 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731062913446 bypass), TestQuotaAdmin2=QuotaState(ts=1731062913446 bypass), TestQuotaAdmin1=QuotaState(ts=1731062913446 bypass)} 2024-11-08T00:48:39,169 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-08T00:48:39,169 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731062913446 bypass)} 2024-11-08T00:48:39,419 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:48:39,420 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-08T00:48:39,420 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731062913446 bypass), TestNs=QuotaState(ts=1731062913446 bypass)} 2024-11-08T00:48:39,420 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731062913446 bypass), TestNs:TestTable=QuotaState(ts=1731062913446 bypass)} 2024-11-08T00:48:39,420 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731062913446 bypass)} 2024-11-08T00:48:39,420 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731062913446 bypass)} 2024-11-08T00:48:39,432 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=301 (was 301), OpenFileDescriptor=537 (was 537), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=658 (was 658), ProcessCount=11 (was 11), AvailableMemoryMB=2408 (was 2416) 2024-11-08T00:48:39,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin0 2024-11-08T00:48:39,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=22, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 2024-11-08T00:48:39,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-08T00:48:39,449 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026919449"}]},"ts":"1731026919449"} 2024-11-08T00:48:39,451 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLING in hbase:meta 2024-11-08T00:48:39,451 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin0 to state=DISABLING 2024-11-08T00:48:39,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0}] 2024-11-08T00:48:39,458 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, UNASSIGN}] 2024-11-08T00:48:39,459 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, UNASSIGN 2024-11-08T00:48:39,460 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=9ddb6d45779c6bfeec462c7bd486822f, regionState=CLOSING, regionLocation=c302dddf724e,36935,1731026906786 2024-11-08T00:48:39,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, UNASSIGN because future has completed 2024-11-08T00:48:39,463 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-08T00:48:39,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9ddb6d45779c6bfeec462c7bd486822f, server=c302dddf724e,36935,1731026906786}] 2024-11-08T00:48:39,620 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(122): Close 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:39,620 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-08T00:48:39,621 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1722): Closing 9ddb6d45779c6bfeec462c7bd486822f, disabling compactions & flushes 2024-11-08T00:48:39,621 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:39,621 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:39,621 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. after waiting 0 ms 2024-11-08T00:48:39,621 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:39,625 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(2902): Flushing 9ddb6d45779c6bfeec462c7bd486822f 1/1 column families, dataSize=578 B heapSize=2.11 KB 2024-11-08T00:48:39,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-08T00:48:39,709 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/.tmp/cf/5f2ff3cfd0e14130b84683c5694b9b52 is 38, key is row-0/cf:q/1731026918874/Put/seqid=0 2024-11-08T00:48:39,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741847_1023 (size=4967) 2024-11-08T00:48:39,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741847_1023 (size=4967) 2024-11-08T00:48:39,734 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=578 B at sequenceid=21 (bloomFilter=false), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/.tmp/cf/5f2ff3cfd0e14130b84683c5694b9b52 2024-11-08T00:48:39,801 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/.tmp/cf/5f2ff3cfd0e14130b84683c5694b9b52 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/cf/5f2ff3cfd0e14130b84683c5694b9b52 2024-11-08T00:48:39,812 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/cf/5f2ff3cfd0e14130b84683c5694b9b52, entries=6, sequenceid=21, filesize=4.9 K 2024-11-08T00:48:39,819 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(3140): Finished flush of dataSize ~578 B/578, heapSize ~2.09 KB/2144, currentSize=0 B/0 for 9ddb6d45779c6bfeec462c7bd486822f in 193ms, sequenceid=21, compaction requested=false 2024-11-08T00:48:39,826 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-08T00:48:39,829 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. 2024-11-08T00:48:39,829 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1676): Region close journal for 9ddb6d45779c6bfeec462c7bd486822f: Waiting for close lock at 1731026919621Running coprocessor pre-close hooks at 1731026919621Disabling compacts and flushes for region at 1731026919621Disabling writes for close at 1731026919621Obtaining lock to block concurrent updates at 1731026919625 (+4 ms)Preparing flush snapshotting stores in 9ddb6d45779c6bfeec462c7bd486822f at 1731026919625Finished memstore snapshotting TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f., syncing WAL and waiting on mvcc, flushsize=dataSize=578, getHeapSize=2144, getOffHeapSize=0, getCellsCount=17 at 1731026919636 (+11 ms)Flushing stores of TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f. at 1731026919637 (+1 ms)Flushing 9ddb6d45779c6bfeec462c7bd486822f/cf: creating writer at 1731026919644 (+7 ms)Flushing 9ddb6d45779c6bfeec462c7bd486822f/cf: appending metadata at 1731026919700 (+56 ms)Flushing 9ddb6d45779c6bfeec462c7bd486822f/cf: closing flushed file at 1731026919703 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@286a8b2e: reopening flushed file at 1731026919799 (+96 ms)Finished flush of dataSize ~578 B/578, heapSize ~2.09 KB/2144, currentSize=0 B/0 for 9ddb6d45779c6bfeec462c7bd486822f in 193ms, sequenceid=21, compaction requested=false at 1731026919819 (+20 ms)Writing region close event to WAL at 1731026919821 (+2 ms)Running coprocessor post-close hooks at 1731026919827 (+6 ms)Closed at 1731026919829 (+2 ms) 2024-11-08T00:48:39,832 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(157): Closed 9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:39,833 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=9ddb6d45779c6bfeec462c7bd486822f, regionState=CLOSED 2024-11-08T00:48:39,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9ddb6d45779c6bfeec462c7bd486822f, server=c302dddf724e,36935,1731026906786 because future has completed 2024-11-08T00:48:39,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=24 2024-11-08T00:48:39,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=24, state=SUCCESS, hasLock=false; CloseRegionProcedure 9ddb6d45779c6bfeec462c7bd486822f, server=c302dddf724e,36935,1731026906786 in 373 msec 2024-11-08T00:48:39,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=23 2024-11-08T00:48:39,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=23, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=9ddb6d45779c6bfeec462c7bd486822f, UNASSIGN in 381 msec 2024-11-08T00:48:39,846 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-11-08T00:48:39,846 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0 in 390 msec 2024-11-08T00:48:39,849 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026919849"}]},"ts":"1731026919849"} 2024-11-08T00:48:39,851 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLED in hbase:meta 2024-11-08T00:48:39,851 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin0 to state=DISABLED 2024-11-08T00:48:39,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 in 413 msec 2024-11-08T00:48:40,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-08T00:48:40,217 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin0 completed 2024-11-08T00:48:40,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin0 2024-11-08T00:48:40,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-08T00:48:40,230 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-08T00:48:40,232 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=26, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-08T00:48:40,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-11-08T00:48:40,240 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:40,246 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/cf, FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/recovered.edits] 2024-11-08T00:48:40,258 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/cf/5f2ff3cfd0e14130b84683c5694b9b52 to hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/archive/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/cf/5f2ff3cfd0e14130b84683c5694b9b52 2024-11-08T00:48:40,264 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/recovered.edits/24.seqid to hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/archive/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f/recovered.edits/24.seqid 2024-11-08T00:48:40,265 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin0/9ddb6d45779c6bfeec462c7bd486822f 2024-11-08T00:48:40,266 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin0 regions 2024-11-08T00:48:40,272 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=26, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-08T00:48:40,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44401 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-08T00:48:40,280 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin0 from hbase:meta 2024-11-08T00:48:40,283 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin0' descriptor. 2024-11-08T00:48:40,285 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=26, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-08T00:48:40,285 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin0' from region states. 2024-11-08T00:48:40,285 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731026920285"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:40,288 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-08T00:48:40,288 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 9ddb6d45779c6bfeec462c7bd486822f, NAME => 'TestQuotaAdmin0,,1731026909487.9ddb6d45779c6bfeec462c7bd486822f.', STARTKEY => '', ENDKEY => ''}] 2024-11-08T00:48:40,288 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin0' as deleted. 2024-11-08T00:48:40,288 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731026920288"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:40,290 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin0 state from META 2024-11-08T00:48:40,291 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=26, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-08T00:48:40,293 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 in 69 msec 2024-11-08T00:48:40,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-11-08T00:48:40,498 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin0 2024-11-08T00:48:40,499 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin0 completed 2024-11-08T00:48:40,499 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin1 2024-11-08T00:48:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=27, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 2024-11-08T00:48:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-11-08T00:48:40,504 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026920504"}]},"ts":"1731026920504"} 2024-11-08T00:48:40,506 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLING in hbase:meta 2024-11-08T00:48:40,506 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin1 to state=DISABLING 2024-11-08T00:48:40,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1}] 2024-11-08T00:48:40,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, UNASSIGN}] 2024-11-08T00:48:40,510 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, UNASSIGN 2024-11-08T00:48:40,511 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=0fa836ad0e77c5a1d34a9799d89291a2, regionState=CLOSING, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:40,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, UNASSIGN because future has completed 2024-11-08T00:48:40,514 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-08T00:48:40,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0fa836ad0e77c5a1d34a9799d89291a2, server=c302dddf724e,44401,1731026906683}] 2024-11-08T00:48:40,668 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(122): Close 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:40,668 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-08T00:48:40,668 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1722): Closing 0fa836ad0e77c5a1d34a9799d89291a2, disabling compactions & flushes 2024-11-08T00:48:40,668 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:40,668 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:40,668 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. after waiting 0 ms 2024-11-08T00:48:40,668 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:40,681 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-08T00:48:40,682 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2. 2024-11-08T00:48:40,682 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1676): Region close journal for 0fa836ad0e77c5a1d34a9799d89291a2: Waiting for close lock at 1731026920668Running coprocessor pre-close hooks at 1731026920668Disabling compacts and flushes for region at 1731026920668Disabling writes for close at 1731026920668Writing region close event to WAL at 1731026920669 (+1 ms)Running coprocessor post-close hooks at 1731026920682 (+13 ms)Closed at 1731026920682 2024-11-08T00:48:40,686 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(157): Closed 0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:40,687 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=0fa836ad0e77c5a1d34a9799d89291a2, regionState=CLOSED 2024-11-08T00:48:40,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0fa836ad0e77c5a1d34a9799d89291a2, server=c302dddf724e,44401,1731026906683 because future has completed 2024-11-08T00:48:40,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=29 2024-11-08T00:48:40,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=29, state=SUCCESS, hasLock=false; CloseRegionProcedure 0fa836ad0e77c5a1d34a9799d89291a2, server=c302dddf724e,44401,1731026906683 in 178 msec 2024-11-08T00:48:40,699 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-11-08T00:48:40,699 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=0fa836ad0e77c5a1d34a9799d89291a2, UNASSIGN in 186 msec 2024-11-08T00:48:40,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-11-08T00:48:40,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1 in 193 msec 2024-11-08T00:48:40,705 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026920705"}]},"ts":"1731026920705"} 2024-11-08T00:48:40,708 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLED in hbase:meta 2024-11-08T00:48:40,708 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin1 to state=DISABLED 2024-11-08T00:48:40,711 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 in 210 msec 2024-11-08T00:48:40,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-11-08T00:48:40,757 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin1 completed 2024-11-08T00:48:40,758 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin1 2024-11-08T00:48:40,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-08T00:48:40,761 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-08T00:48:40,762 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=31, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-08T00:48:40,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-08T00:48:40,766 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:40,771 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2/cf, FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2/recovered.edits] 2024-11-08T00:48:40,786 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2/recovered.edits/4.seqid to hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/archive/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2/recovered.edits/4.seqid 2024-11-08T00:48:40,787 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin1/0fa836ad0e77c5a1d34a9799d89291a2 2024-11-08T00:48:40,788 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin1 regions 2024-11-08T00:48:40,792 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=31, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-08T00:48:40,796 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin1 from hbase:meta 2024-11-08T00:48:40,799 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin1' descriptor. 2024-11-08T00:48:40,801 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=31, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-08T00:48:40,802 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin1' from region states. 2024-11-08T00:48:40,802 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731026920802"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:40,805 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-08T00:48:40,805 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0fa836ad0e77c5a1d34a9799d89291a2, NAME => 'TestQuotaAdmin1,,1731026910333.0fa836ad0e77c5a1d34a9799d89291a2.', STARTKEY => '', ENDKEY => ''}] 2024-11-08T00:48:40,805 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin1' as deleted. 2024-11-08T00:48:40,806 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731026920805"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:40,809 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin1 state from META 2024-11-08T00:48:40,810 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=31, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-08T00:48:40,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 in 53 msec 2024-11-08T00:48:41,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-08T00:48:41,028 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin1 2024-11-08T00:48:41,028 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin1 completed 2024-11-08T00:48:41,028 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin2 2024-11-08T00:48:41,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 2024-11-08T00:48:41,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-11-08T00:48:41,034 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026921033"}]},"ts":"1731026921033"} 2024-11-08T00:48:41,035 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLING in hbase:meta 2024-11-08T00:48:41,035 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin2 to state=DISABLING 2024-11-08T00:48:41,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2}] 2024-11-08T00:48:41,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, UNASSIGN}] 2024-11-08T00:48:41,040 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, UNASSIGN 2024-11-08T00:48:41,040 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=5722bdba1189d7abc2c73c96dd29b2f1, regionState=CLOSING, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:41,041 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46149 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=c302dddf724e,44401,1731026906683, table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-08T00:48:41,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, UNASSIGN because future has completed 2024-11-08T00:48:41,043 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-08T00:48:41,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5722bdba1189d7abc2c73c96dd29b2f1, server=c302dddf724e,44401,1731026906683}] 2024-11-08T00:48:41,196 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:41,196 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-08T00:48:41,196 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 5722bdba1189d7abc2c73c96dd29b2f1, disabling compactions & flushes 2024-11-08T00:48:41,196 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:41,196 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:41,196 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. after waiting 0 ms 2024-11-08T00:48:41,196 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:41,202 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-08T00:48:41,203 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1. 2024-11-08T00:48:41,203 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 5722bdba1189d7abc2c73c96dd29b2f1: Waiting for close lock at 1731026921196Running coprocessor pre-close hooks at 1731026921196Disabling compacts and flushes for region at 1731026921196Disabling writes for close at 1731026921196Writing region close event to WAL at 1731026921197 (+1 ms)Running coprocessor post-close hooks at 1731026921203 (+6 ms)Closed at 1731026921203 2024-11-08T00:48:41,205 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:41,206 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=5722bdba1189d7abc2c73c96dd29b2f1, regionState=CLOSED 2024-11-08T00:48:41,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5722bdba1189d7abc2c73c96dd29b2f1, server=c302dddf724e,44401,1731026906683 because future has completed 2024-11-08T00:48:41,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-11-08T00:48:41,212 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 5722bdba1189d7abc2c73c96dd29b2f1, server=c302dddf724e,44401,1731026906683 in 166 msec 2024-11-08T00:48:41,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-11-08T00:48:41,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=5722bdba1189d7abc2c73c96dd29b2f1, UNASSIGN in 173 msec 2024-11-08T00:48:41,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-11-08T00:48:41,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2 in 179 msec 2024-11-08T00:48:41,219 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026921219"}]},"ts":"1731026921219"} 2024-11-08T00:48:41,221 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLED in hbase:meta 2024-11-08T00:48:41,221 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin2 to state=DISABLED 2024-11-08T00:48:41,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 in 194 msec 2024-11-08T00:48:41,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-11-08T00:48:41,288 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin2 completed 2024-11-08T00:48:41,288 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin2 2024-11-08T00:48:41,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-08T00:48:41,291 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-08T00:48:41,292 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-08T00:48:41,295 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:41,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-08T00:48:41,298 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1/cf, FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1/recovered.edits] 2024-11-08T00:48:41,306 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1/recovered.edits/4.seqid to hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/archive/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1/recovered.edits/4.seqid 2024-11-08T00:48:41,307 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/default/TestQuotaAdmin2/5722bdba1189d7abc2c73c96dd29b2f1 2024-11-08T00:48:41,307 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin2 regions 2024-11-08T00:48:41,311 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-08T00:48:41,314 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin2 from hbase:meta 2024-11-08T00:48:41,317 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin2' descriptor. 2024-11-08T00:48:41,320 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-08T00:48:41,320 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin2' from region states. 2024-11-08T00:48:41,320 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731026921320"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:41,323 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-08T00:48:41,323 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5722bdba1189d7abc2c73c96dd29b2f1, NAME => 'TestQuotaAdmin2,,1731026911131.5722bdba1189d7abc2c73c96dd29b2f1.', STARTKEY => '', ENDKEY => ''}] 2024-11-08T00:48:41,323 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin2' as deleted. 2024-11-08T00:48:41,323 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731026921323"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:41,325 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin2 state from META 2024-11-08T00:48:41,327 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-08T00:48:41,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 in 39 msec 2024-11-08T00:48:41,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-08T00:48:41,557 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin2 2024-11-08T00:48:41,557 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin2 completed 2024-11-08T00:48:41,558 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestNs:TestTable 2024-11-08T00:48:41,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestNs:TestTable 2024-11-08T00:48:41,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-08T00:48:41,562 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026921562"}]},"ts":"1731026921562"} 2024-11-08T00:48:41,564 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLING in hbase:meta 2024-11-08T00:48:41,564 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestNs:TestTable to state=DISABLING 2024-11-08T00:48:41,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable}] 2024-11-08T00:48:41,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, UNASSIGN}, {pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, UNASSIGN}] 2024-11-08T00:48:41,568 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, UNASSIGN 2024-11-08T00:48:41,568 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, UNASSIGN 2024-11-08T00:48:41,569 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=822a2b407be6bb38301bc44c8cd44212, regionState=CLOSING, regionLocation=c302dddf724e,36935,1731026906786 2024-11-08T00:48:41,569 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=9658ac49e4a052fe7d43286a103ecab9, regionState=CLOSING, regionLocation=c302dddf724e,44401,1731026906683 2024-11-08T00:48:41,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, UNASSIGN because future has completed 2024-11-08T00:48:41,571 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-08T00:48:41,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 822a2b407be6bb38301bc44c8cd44212, server=c302dddf724e,36935,1731026906786}] 2024-11-08T00:48:41,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, UNASSIGN because future has completed 2024-11-08T00:48:41,572 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-08T00:48:41,572 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9658ac49e4a052fe7d43286a103ecab9, server=c302dddf724e,44401,1731026906683}] 2024-11-08T00:48:41,724 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(122): Close 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:41,724 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-08T00:48:41,725 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1722): Closing 822a2b407be6bb38301bc44c8cd44212, disabling compactions & flushes 2024-11-08T00:48:41,725 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:41,725 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:41,725 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. after waiting 0 ms 2024-11-08T00:48:41,725 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:41,726 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:41,726 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-08T00:48:41,726 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 9658ac49e4a052fe7d43286a103ecab9, disabling compactions & flushes 2024-11-08T00:48:41,727 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:41,727 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:41,727 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. after waiting 0 ms 2024-11-08T00:48:41,727 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:41,730 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-08T00:48:41,731 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212. 2024-11-08T00:48:41,731 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-08T00:48:41,731 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1676): Region close journal for 822a2b407be6bb38301bc44c8cd44212: Waiting for close lock at 1731026921725Running coprocessor pre-close hooks at 1731026921725Disabling compacts and flushes for region at 1731026921725Disabling writes for close at 1731026921725Writing region close event to WAL at 1731026921725Running coprocessor post-close hooks at 1731026921731 (+6 ms)Closed at 1731026921731 2024-11-08T00:48:41,732 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9. 2024-11-08T00:48:41,732 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 9658ac49e4a052fe7d43286a103ecab9: Waiting for close lock at 1731026921726Running coprocessor pre-close hooks at 1731026921726Disabling compacts and flushes for region at 1731026921726Disabling writes for close at 1731026921727 (+1 ms)Writing region close event to WAL at 1731026921727Running coprocessor post-close hooks at 1731026921732 (+5 ms)Closed at 1731026921732 2024-11-08T00:48:41,734 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(157): Closed 822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:41,734 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=822a2b407be6bb38301bc44c8cd44212, regionState=CLOSED 2024-11-08T00:48:41,735 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:41,736 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=9658ac49e4a052fe7d43286a103ecab9, regionState=CLOSED 2024-11-08T00:48:41,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 822a2b407be6bb38301bc44c8cd44212, server=c302dddf724e,36935,1731026906786 because future has completed 2024-11-08T00:48:41,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9658ac49e4a052fe7d43286a103ecab9, server=c302dddf724e,44401,1731026906683 because future has completed 2024-11-08T00:48:41,740 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=40 2024-11-08T00:48:41,740 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 822a2b407be6bb38301bc44c8cd44212, server=c302dddf724e,36935,1731026906786 in 166 msec 2024-11-08T00:48:41,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=39 2024-11-08T00:48:41,742 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=822a2b407be6bb38301bc44c8cd44212, UNASSIGN in 173 msec 2024-11-08T00:48:41,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=39, state=SUCCESS, hasLock=false; CloseRegionProcedure 9658ac49e4a052fe7d43286a103ecab9, server=c302dddf724e,44401,1731026906683 in 167 msec 2024-11-08T00:48:41,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-08T00:48:41,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=9658ac49e4a052fe7d43286a103ecab9, UNASSIGN in 175 msec 2024-11-08T00:48:41,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=37 2024-11-08T00:48:41,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=37, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable in 180 msec 2024-11-08T00:48:41,749 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026921749"}]},"ts":"1731026921749"} 2024-11-08T00:48:41,751 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLED in hbase:meta 2024-11-08T00:48:41,751 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestNs:TestTable to state=DISABLED 2024-11-08T00:48:41,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestNs:TestTable in 194 msec 2024-11-08T00:48:41,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-08T00:48:41,818 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: TestNs:TestTable completed 2024-11-08T00:48:41,818 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestNs:TestTable 2024-11-08T00:48:41,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestNs:TestTable 2024-11-08T00:48:41,821 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-08T00:48:41,822 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=43, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-08T00:48:41,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-11-08T00:48:41,826 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:41,826 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:41,829 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9/cf, FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9/recovered.edits] 2024-11-08T00:48:41,829 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212/cf, FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212/recovered.edits] 2024-11-08T00:48:41,838 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212/recovered.edits/4.seqid to hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/archive/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212/recovered.edits/4.seqid 2024-11-08T00:48:41,838 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9/recovered.edits/4.seqid to hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/archive/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9/recovered.edits/4.seqid 2024-11-08T00:48:41,839 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/822a2b407be6bb38301bc44c8cd44212 2024-11-08T00:48:41,839 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/TestNs/TestTable/9658ac49e4a052fe7d43286a103ecab9 2024-11-08T00:48:41,839 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestNs:TestTable regions 2024-11-08T00:48:41,843 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=43, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-08T00:48:41,847 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of TestNs:TestTable from hbase:meta 2024-11-08T00:48:41,850 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestNs:TestTable' descriptor. 2024-11-08T00:48:41,852 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=43, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-08T00:48:41,852 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestNs:TestTable' from region states. 2024-11-08T00:48:41,852 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731026921852"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:41,853 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731026921852"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:41,855 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-08T00:48:41,856 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 9658ac49e4a052fe7d43286a103ecab9, NAME => 'TestNs:TestTable,,1731026912199.9658ac49e4a052fe7d43286a103ecab9.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 822a2b407be6bb38301bc44c8cd44212, NAME => 'TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212.', STARTKEY => '1', ENDKEY => ''}] 2024-11-08T00:48:41,856 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestNs:TestTable' as deleted. 2024-11-08T00:48:41,856 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731026921856"}]},"ts":"9223372036854775807"} 2024-11-08T00:48:41,858 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table TestNs:TestTable state from META 2024-11-08T00:48:41,859 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=43, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-08T00:48:41,860 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestNs:TestTable in 41 msec 2024-11-08T00:48:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-11-08T00:48:42,087 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestNs:TestTable 2024-11-08T00:48:42,088 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: TestNs:TestTable completed 2024-11-08T00:48:42,090 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.HMaster$20(3601): Client=jenkins//172.17.0.2 delete TestNs 2024-11-08T00:48:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs 2024-11-08T00:48:42,096 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-08T00:48:42,098 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_FROM_NS_TABLE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-08T00:48:42,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-08T00:48:42,100 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_DIRECTORIES, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-08T00:48:42,102 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-08T00:48:42,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs in 12 msec 2024-11-08T00:48:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-08T00:48:42,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T00:48:42,357 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: DELETE_NAMESPACE, Namespace: TestNs completed 2024-11-08T00:48:42,358 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:48:42,358 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:48:42,363 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:48:42,363 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:48:42,363 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T00:48:42,364 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T00:48:42,364 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1284221299, stopped=false 2024-11-08T00:48:42,364 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-11-08T00:48:42,364 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c302dddf724e,46149,1731026905945 2024-11-08T00:48:42,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:48:42,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:48:42,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:48:42,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:42,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:42,366 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:48:42,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:42,367 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:48:42,367 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:48:42,367 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:48:42,367 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:48:42,367 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:48:42,367 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c302dddf724e,44401,1731026906683' ***** 2024-11-08T00:48:42,367 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:48:42,368 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c302dddf724e,36935,1731026906786' ***** 2024-11-08T00:48:42,368 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:48:42,368 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:48:42,368 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:48:42,368 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:48:42,368 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:48:42,368 INFO [RS:1;c302dddf724e:36935 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:48:42,368 INFO [RS:0;c302dddf724e:44401 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:48:42,368 INFO [RS:1;c302dddf724e:36935 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:48:42,368 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:48:42,368 INFO [RS:0;c302dddf724e:44401 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:48:42,368 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(959): stopping server c302dddf724e,36935,1731026906786 2024-11-08T00:48:42,368 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(3091): Received CLOSE for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:42,368 INFO [RS:1;c302dddf724e:36935 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:48:42,369 INFO [RS:1;c302dddf724e:36935 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c302dddf724e:36935. 2024-11-08T00:48:42,369 DEBUG [RS:1;c302dddf724e:36935 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:48:42,369 DEBUG [RS:1;c302dddf724e:36935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:48:42,369 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(959): stopping server c302dddf724e,44401,1731026906683 2024-11-08T00:48:42,369 INFO [RS:0;c302dddf724e:44401 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:48:42,369 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(976): stopping server c302dddf724e,36935,1731026906786; all regions closed. 2024-11-08T00:48:42,369 INFO [RS:0;c302dddf724e:44401 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c302dddf724e:44401. 2024-11-08T00:48:42,369 DEBUG [RS:1;c302dddf724e:36935 {}] quotas.QuotaCache(112): Stopping QuotaRefresherChore chore. 2024-11-08T00:48:42,369 DEBUG [RS:0;c302dddf724e:44401 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:48:42,369 DEBUG [RS:0;c302dddf724e:44401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:48:42,369 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2f57c4e8f93fd2814d7a8ce1d4ced5a1, disabling compactions & flushes 2024-11-08T00:48:42,369 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:42,369 INFO [RS:0;c302dddf724e:44401 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:48:42,369 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:42,369 INFO [RS:0;c302dddf724e:44401 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:48:42,369 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. after waiting 0 ms 2024-11-08T00:48:42,369 INFO [RS:0;c302dddf724e:44401 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:48:42,369 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:42,370 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T00:48:42,370 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2/2 column families, dataSize=726 B heapSize=2.44 KB 2024-11-08T00:48:42,370 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-08T00:48:42,370 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 2f57c4e8f93fd2814d7a8ce1d4ced5a1=hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1.} 2024-11-08T00:48:42,370 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:48:42,370 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:48:42,370 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:48:42,370 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2f57c4e8f93fd2814d7a8ce1d4ced5a1 2024-11-08T00:48:42,370 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:48:42,370 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:48:42,371 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=13.33 KB heapSize=24.55 KB 2024-11-08T00:48:42,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741834_1010 (size=3488) 2024-11-08T00:48:42,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741834_1010 (size=3488) 2024-11-08T00:48:42,379 DEBUG [RS:1;c302dddf724e:36935 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/oldWALs 2024-11-08T00:48:42,379 INFO [RS:1;c302dddf724e:36935 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c302dddf724e%2C36935%2C1731026906786:(num 1731026908072) 2024-11-08T00:48:42,379 DEBUG [RS:1;c302dddf724e:36935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:48:42,379 INFO [RS:1;c302dddf724e:36935 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:48:42,379 INFO [RS:1;c302dddf724e:36935 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:48:42,379 INFO [RS:1;c302dddf724e:36935 {}] hbase.ChoreService(370): Chore service for: regionserver/c302dddf724e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T00:48:42,380 INFO [RS:1;c302dddf724e:36935 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:48:42,380 INFO [RS:1;c302dddf724e:36935 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:48:42,380 INFO [RS:1;c302dddf724e:36935 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:48:42,380 INFO [regionserver/c302dddf724e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:48:42,380 INFO [RS:1;c302dddf724e:36935 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:48:42,380 INFO [RS:1;c302dddf724e:36935 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36935 2024-11-08T00:48:42,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c302dddf724e,36935,1731026906786 2024-11-08T00:48:42,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:48:42,384 INFO [RS:1;c302dddf724e:36935 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:48:42,387 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c302dddf724e,36935,1731026906786] 2024-11-08T00:48:42,389 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c302dddf724e,36935,1731026906786 already deleted, retry=false 2024-11-08T00:48:42,389 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c302dddf724e,36935,1731026906786 expired; onlineServers=1 2024-11-08T00:48:42,402 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/info/0040d3faead1408eaab3ecfdd25c5409 is 135, key is hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1./info:regioninfo/1731026909385/Put/seqid=0 2024-11-08T00:48:42,402 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/.tmp/q/8be6cb50efe3408bb01a60d0874ccba9 is 44, key is u.jenkins/q:s.default:/1731026915194/DeleteColumn/seqid=0 2024-11-08T00:48:42,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741849_1025 (size=5302) 2024-11-08T00:48:42,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741849_1025 (size=5302) 2024-11-08T00:48:42,415 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=597 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/.tmp/q/8be6cb50efe3408bb01a60d0874ccba9 2024-11-08T00:48:42,425 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8be6cb50efe3408bb01a60d0874ccba9 2024-11-08T00:48:42,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741848_1024 (size=7362) 2024-11-08T00:48:42,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741848_1024 (size=7362) 2024-11-08T00:48:42,427 INFO [regionserver/c302dddf724e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:48:42,427 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.80 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/info/0040d3faead1408eaab3ecfdd25c5409 2024-11-08T00:48:42,430 INFO [regionserver/c302dddf724e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:48:42,444 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/.tmp/u/6238e6af8b6c47e1a63098317fc49c21 is 43, key is t.TestNs:TestTable/u:/1731026917837/DeleteFamily/seqid=0 2024-11-08T00:48:42,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741850_1026 (size=5250) 2024-11-08T00:48:42,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741850_1026 (size=5250) 2024-11-08T00:48:42,451 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=129 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/.tmp/u/6238e6af8b6c47e1a63098317fc49c21 2024-11-08T00:48:42,452 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/ns/5d8f9b271a0a4e519a3e25a94bbff00b is 92, key is TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212./ns:/1731026921843/DeleteFamily/seqid=0 2024-11-08T00:48:42,460 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6238e6af8b6c47e1a63098317fc49c21 2024-11-08T00:48:42,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741851_1027 (size=5710) 2024-11-08T00:48:42,462 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/.tmp/q/8be6cb50efe3408bb01a60d0874ccba9 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/q/8be6cb50efe3408bb01a60d0874ccba9 2024-11-08T00:48:42,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741851_1027 (size=5710) 2024-11-08T00:48:42,462 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=572 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/ns/5d8f9b271a0a4e519a3e25a94bbff00b 2024-11-08T00:48:42,471 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8be6cb50efe3408bb01a60d0874ccba9 2024-11-08T00:48:42,471 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/q/8be6cb50efe3408bb01a60d0874ccba9, entries=5, sequenceid=17, filesize=5.2 K 2024-11-08T00:48:42,473 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/.tmp/u/6238e6af8b6c47e1a63098317fc49c21 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/u/6238e6af8b6c47e1a63098317fc49c21 2024-11-08T00:48:42,480 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6238e6af8b6c47e1a63098317fc49c21 2024-11-08T00:48:42,480 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/u/6238e6af8b6c47e1a63098317fc49c21, entries=4, sequenceid=17, filesize=5.1 K 2024-11-08T00:48:42,481 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~726 B/726, heapSize ~2.41 KB/2464, currentSize=0 B/0 for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 in 111ms, sequenceid=17, compaction requested=false 2024-11-08T00:48:42,486 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/quota/2f57c4e8f93fd2814d7a8ce1d4ced5a1/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=1 2024-11-08T00:48:42,487 INFO [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:42,487 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2f57c4e8f93fd2814d7a8ce1d4ced5a1: Waiting for close lock at 1731026922369Running coprocessor pre-close hooks at 1731026922369Disabling compacts and flushes for region at 1731026922369Disabling writes for close at 1731026922369Obtaining lock to block concurrent updates at 1731026922370 (+1 ms)Preparing flush snapshotting stores in 2f57c4e8f93fd2814d7a8ce1d4ced5a1 at 1731026922370Finished memstore snapshotting hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1., syncing WAL and waiting on mvcc, flushsize=dataSize=726, getHeapSize=2464, getOffHeapSize=0, getCellsCount=17 at 1731026922370Flushing stores of hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. at 1731026922371 (+1 ms)Flushing 2f57c4e8f93fd2814d7a8ce1d4ced5a1/q: creating writer at 1731026922371Flushing 2f57c4e8f93fd2814d7a8ce1d4ced5a1/q: appending metadata at 1731026922396 (+25 ms)Flushing 2f57c4e8f93fd2814d7a8ce1d4ced5a1/q: closing flushed file at 1731026922396Flushing 2f57c4e8f93fd2814d7a8ce1d4ced5a1/u: creating writer at 1731026922425 (+29 ms)Flushing 2f57c4e8f93fd2814d7a8ce1d4ced5a1/u: appending metadata at 1731026922442 (+17 ms)Flushing 2f57c4e8f93fd2814d7a8ce1d4ced5a1/u: closing flushed file at 1731026922442Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@415224c4: reopening flushed file at 1731026922460 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f699c32: reopening flushed file at 1731026922472 (+12 ms)Finished flush of dataSize ~726 B/726, heapSize ~2.41 KB/2464, currentSize=0 B/0 for 2f57c4e8f93fd2814d7a8ce1d4ced5a1 in 111ms, sequenceid=17, compaction requested=false at 1731026922481 (+9 ms)Writing region close event to WAL at 1731026922482 (+1 ms)Running coprocessor post-close hooks at 1731026922487 (+5 ms)Closed at 1731026922487 2024-11-08T00:48:42,487 DEBUG [RS_CLOSE_REGION-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:quota,,1731026908923.2f57c4e8f93fd2814d7a8ce1d4ced5a1. 2024-11-08T00:48:42,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:48:42,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36935-0x100f50c52a90002, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:48:42,488 INFO [RS:1;c302dddf724e:36935 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:48:42,488 INFO [RS:1;c302dddf724e:36935 {}] regionserver.HRegionServer(1031): Exiting; stopping=c302dddf724e,36935,1731026906786; zookeeper connection closed. 2024-11-08T00:48:42,488 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ed825c7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ed825c7 2024-11-08T00:48:42,490 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/rep_barrier/a9a7bd06a9aa4bb0bf7ea1421eff5cc6 is 101, key is TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212./rep_barrier:/1731026921843/DeleteFamily/seqid=0 2024-11-08T00:48:42,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741852_1028 (size=5823) 2024-11-08T00:48:42,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741852_1028 (size=5823) 2024-11-08T00:48:42,497 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=515 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/rep_barrier/a9a7bd06a9aa4bb0bf7ea1421eff5cc6 2024-11-08T00:48:42,521 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/table/86b120a6c8374fcd96e9465b50524d35 is 95, key is TestNs:TestTable,1,1731026912199.822a2b407be6bb38301bc44c8cd44212./table:/1731026921843/DeleteFamily/seqid=0 2024-11-08T00:48:42,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741853_1029 (size=5966) 2024-11-08T00:48:42,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741853_1029 (size=5966) 2024-11-08T00:48:42,529 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/table/86b120a6c8374fcd96e9465b50524d35 2024-11-08T00:48:42,537 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/info/0040d3faead1408eaab3ecfdd25c5409 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/info/0040d3faead1408eaab3ecfdd25c5409 2024-11-08T00:48:42,546 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/info/0040d3faead1408eaab3ecfdd25c5409, entries=21, sequenceid=65, filesize=7.2 K 2024-11-08T00:48:42,547 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/ns/5d8f9b271a0a4e519a3e25a94bbff00b as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/ns/5d8f9b271a0a4e519a3e25a94bbff00b 2024-11-08T00:48:42,554 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/ns/5d8f9b271a0a4e519a3e25a94bbff00b, entries=8, sequenceid=65, filesize=5.6 K 2024-11-08T00:48:42,555 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/rep_barrier/a9a7bd06a9aa4bb0bf7ea1421eff5cc6 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/rep_barrier/a9a7bd06a9aa4bb0bf7ea1421eff5cc6 2024-11-08T00:48:42,563 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/rep_barrier/a9a7bd06a9aa4bb0bf7ea1421eff5cc6, entries=6, sequenceid=65, filesize=5.7 K 2024-11-08T00:48:42,565 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/.tmp/table/86b120a6c8374fcd96e9465b50524d35 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/table/86b120a6c8374fcd96e9465b50524d35 2024-11-08T00:48:42,570 DEBUG [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-08T00:48:42,573 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/table/86b120a6c8374fcd96e9465b50524d35, entries=12, sequenceid=65, filesize=5.8 K 2024-11-08T00:48:42,575 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 204ms, sequenceid=65, compaction requested=false 2024-11-08T00:48:42,581 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/data/hbase/meta/1588230740/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-08T00:48:42,581 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:48:42,582 INFO [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:48:42,582 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026922370Running coprocessor pre-close hooks at 1731026922370Disabling compacts and flushes for region at 1731026922370Disabling writes for close at 1731026922370Obtaining lock to block concurrent updates at 1731026922371 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731026922371Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=13653, getHeapSize=25072, getOffHeapSize=0, getCellsCount=139 at 1731026922372 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731026922372Flushing 1588230740/info: creating writer at 1731026922372Flushing 1588230740/info: appending metadata at 1731026922399 (+27 ms)Flushing 1588230740/info: closing flushed file at 1731026922399Flushing 1588230740/ns: creating writer at 1731026922435 (+36 ms)Flushing 1588230740/ns: appending metadata at 1731026922451 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731026922451Flushing 1588230740/rep_barrier: creating writer at 1731026922469 (+18 ms)Flushing 1588230740/rep_barrier: appending metadata at 1731026922490 (+21 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1731026922490Flushing 1588230740/table: creating writer at 1731026922504 (+14 ms)Flushing 1588230740/table: appending metadata at 1731026922521 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731026922521Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f449940: reopening flushed file at 1731026922536 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ca04a51: reopening flushed file at 1731026922546 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a151a4b: reopening flushed file at 1731026922555 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61e77779: reopening flushed file at 1731026922564 (+9 ms)Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 204ms, sequenceid=65, compaction requested=false at 1731026922575 (+11 ms)Writing region close event to WAL at 1731026922576 (+1 ms)Running coprocessor post-close hooks at 1731026922581 (+5 ms)Closed at 1731026922581 2024-11-08T00:48:42,582 DEBUG [RS_CLOSE_META-regionserver/c302dddf724e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T00:48:42,771 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(976): stopping server c302dddf724e,44401,1731026906683; all regions closed. 2024-11-08T00:48:42,771 DEBUG [RS:0;c302dddf724e:44401 {}] quotas.QuotaCache(112): Stopping QuotaRefresherChore chore. 2024-11-08T00:48:42,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741835_1011 (size=17505) 2024-11-08T00:48:42,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741835_1011 (size=17505) 2024-11-08T00:48:42,778 DEBUG [RS:0;c302dddf724e:44401 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/oldWALs 2024-11-08T00:48:42,778 INFO [RS:0;c302dddf724e:44401 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c302dddf724e%2C44401%2C1731026906683.meta:.meta(num 1731026908668) 2024-11-08T00:48:42,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741833_1009 (size=4494) 2024-11-08T00:48:42,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741833_1009 (size=4494) 2024-11-08T00:48:42,784 DEBUG [RS:0;c302dddf724e:44401 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/oldWALs 2024-11-08T00:48:42,784 INFO [RS:0;c302dddf724e:44401 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c302dddf724e%2C44401%2C1731026906683:(num 1731026908072) 2024-11-08T00:48:42,784 DEBUG [RS:0;c302dddf724e:44401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:48:42,784 INFO [RS:0;c302dddf724e:44401 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:48:42,784 INFO [RS:0;c302dddf724e:44401 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:48:42,785 INFO [RS:0;c302dddf724e:44401 {}] hbase.ChoreService(370): Chore service for: regionserver/c302dddf724e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T00:48:42,785 INFO [RS:0;c302dddf724e:44401 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:48:42,785 INFO [regionserver/c302dddf724e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:48:42,785 INFO [RS:0;c302dddf724e:44401 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44401 2024-11-08T00:48:42,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c302dddf724e,44401,1731026906683 2024-11-08T00:48:42,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:48:42,787 INFO [RS:0;c302dddf724e:44401 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:48:42,788 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c302dddf724e,44401,1731026906683] 2024-11-08T00:48:42,791 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c302dddf724e,44401,1731026906683 already deleted, retry=false 2024-11-08T00:48:42,791 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c302dddf724e,44401,1731026906683 expired; onlineServers=0 2024-11-08T00:48:42,791 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c302dddf724e,46149,1731026905945' ***** 2024-11-08T00:48:42,791 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T00:48:42,791 INFO [M:0;c302dddf724e:46149 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:48:42,791 INFO [M:0;c302dddf724e:46149 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:48:42,791 DEBUG [M:0;c302dddf724e:46149 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T00:48:42,792 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T00:48:42,792 DEBUG [M:0;c302dddf724e:46149 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T00:48:42,792 DEBUG [master/c302dddf724e:0:becomeActiveMaster-HFileCleaner.small.0-1731026907793 {}] cleaner.HFileCleaner(306): Exit Thread[master/c302dddf724e:0:becomeActiveMaster-HFileCleaner.small.0-1731026907793,5,FailOnTimeoutGroup] 2024-11-08T00:48:42,792 DEBUG [master/c302dddf724e:0:becomeActiveMaster-HFileCleaner.large.0-1731026907791 {}] cleaner.HFileCleaner(306): Exit Thread[master/c302dddf724e:0:becomeActiveMaster-HFileCleaner.large.0-1731026907791,5,FailOnTimeoutGroup] 2024-11-08T00:48:42,792 INFO [M:0;c302dddf724e:46149 {}] hbase.ChoreService(370): Chore service for: master/c302dddf724e:0 had [ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS, ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T00:48:42,792 INFO [M:0;c302dddf724e:46149 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:48:42,792 DEBUG [M:0;c302dddf724e:46149 {}] master.HMaster(1795): Stopping service threads 2024-11-08T00:48:42,792 INFO [M:0;c302dddf724e:46149 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T00:48:42,792 INFO [M:0;c302dddf724e:46149 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:48:42,793 ERROR [M:0;c302dddf724e:46149 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-1,5,PEWorkerGroup] Thread[HFileArchiver-2,5,PEWorkerGroup] Thread[HFileArchiver-3,5,PEWorkerGroup] Thread[HFileArchiver-4,5,PEWorkerGroup] Thread[HFileArchiver-5,5,PEWorkerGroup] 2024-11-08T00:48:42,793 INFO [M:0;c302dddf724e:46149 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T00:48:42,793 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T00:48:42,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T00:48:42,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:48:42,794 DEBUG [M:0;c302dddf724e:46149 {}] zookeeper.ZKUtil(347): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T00:48:42,794 WARN [M:0;c302dddf724e:46149 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T00:48:42,794 INFO [M:0;c302dddf724e:46149 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/.lastflushedseqids 2024-11-08T00:48:42,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741854_1030 (size=134) 2024-11-08T00:48:42,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741854_1030 (size=134) 2024-11-08T00:48:42,809 INFO [M:0;c302dddf724e:46149 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T00:48:42,809 INFO [M:0;c302dddf724e:46149 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T00:48:42,809 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:48:42,809 INFO [M:0;c302dddf724e:46149 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:48:42,809 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:48:42,809 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:48:42,809 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:48:42,809 INFO [M:0;c302dddf724e:46149 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=157.72 KB heapSize=190.77 KB 2024-11-08T00:48:42,827 DEBUG [M:0;c302dddf724e:46149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d53187f292d244e1ac92dca77a4b59f0 is 82, key is hbase:meta,,1/info:regioninfo/1731026908748/Put/seqid=0 2024-11-08T00:48:42,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741855_1031 (size=5672) 2024-11-08T00:48:42,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741855_1031 (size=5672) 2024-11-08T00:48:42,834 INFO [M:0;c302dddf724e:46149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d53187f292d244e1ac92dca77a4b59f0 2024-11-08T00:48:42,860 DEBUG [M:0;c302dddf724e:46149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/19228bb1bf47499e8b0bcaf5d868eb43 is 958, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731026909411/Put/seqid=0 2024-11-08T00:48:42,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741856_1032 (size=13432) 2024-11-08T00:48:42,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741856_1032 (size=13432) 2024-11-08T00:48:42,867 INFO [M:0;c302dddf724e:46149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=157.10 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/19228bb1bf47499e8b0bcaf5d868eb43 2024-11-08T00:48:42,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:48:42,889 DEBUG [M:0;c302dddf724e:46149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9466b970871248e39cc233007124b499 is 69, key is c302dddf724e,36935,1731026906786/rs:state/1731026907821/Put/seqid=0 2024-11-08T00:48:42,890 INFO [RS:0;c302dddf724e:44401 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:48:42,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44401-0x100f50c52a90001, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:48:42,890 INFO [RS:0;c302dddf724e:44401 {}] regionserver.HRegionServer(1031): Exiting; stopping=c302dddf724e,44401,1731026906683; zookeeper connection closed. 2024-11-08T00:48:42,890 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@626db717 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@626db717 2024-11-08T00:48:42,890 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-08T00:48:42,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741857_1033 (size=5224) 2024-11-08T00:48:42,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741857_1033 (size=5224) 2024-11-08T00:48:43,298 INFO [M:0;c302dddf724e:46149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9466b970871248e39cc233007124b499 2024-11-08T00:48:43,306 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d53187f292d244e1ac92dca77a4b59f0 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d53187f292d244e1ac92dca77a4b59f0 2024-11-08T00:48:43,315 INFO [M:0;c302dddf724e:46149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d53187f292d244e1ac92dca77a4b59f0, entries=8, sequenceid=375, filesize=5.5 K 2024-11-08T00:48:43,316 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/19228bb1bf47499e8b0bcaf5d868eb43 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/19228bb1bf47499e8b0bcaf5d868eb43 2024-11-08T00:48:43,324 INFO [M:0;c302dddf724e:46149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/19228bb1bf47499e8b0bcaf5d868eb43, entries=44, sequenceid=375, filesize=13.1 K 2024-11-08T00:48:43,326 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9466b970871248e39cc233007124b499 as hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9466b970871248e39cc233007124b499 2024-11-08T00:48:43,335 INFO [M:0;c302dddf724e:46149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36609/user/jenkins/test-data/d71cf96c-34f6-1b67-f4c7-12be7f9b0cc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9466b970871248e39cc233007124b499, entries=2, sequenceid=375, filesize=5.1 K 2024-11-08T00:48:43,336 INFO [M:0;c302dddf724e:46149 {}] regionserver.HRegion(3140): Finished flush of dataSize ~157.72 KB/161504, heapSize ~190.47 KB/195040, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 527ms, sequenceid=375, compaction requested=false 2024-11-08T00:48:43,338 INFO [M:0;c302dddf724e:46149 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:48:43,338 DEBUG [M:0;c302dddf724e:46149 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026922809Disabling compacts and flushes for region at 1731026922809Disabling writes for close at 1731026922809Obtaining lock to block concurrent updates at 1731026922809Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731026922809Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=161504, getHeapSize=195280, getOffHeapSize=0, getCellsCount=434 at 1731026922810 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731026922810Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731026922810Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731026922827 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731026922827Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731026922841 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731026922859 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731026922859Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731026922874 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731026922889 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731026922889Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fc2eab: reopening flushed file at 1731026923305 (+416 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9a47443: reopening flushed file at 1731026923315 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@689ca6ae: reopening flushed file at 1731026923325 (+10 ms)Finished flush of dataSize ~157.72 KB/161504, heapSize ~190.47 KB/195040, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 527ms, sequenceid=375, compaction requested=false at 1731026923336 (+11 ms)Writing region close event to WAL at 1731026923338 (+2 ms)Closed at 1731026923338 2024-11-08T00:48:43,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33807 is added to blk_1073741830_1006 (size=186534) 2024-11-08T00:48:43,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44001 is added to blk_1073741830_1006 (size=186534) 2024-11-08T00:48:43,344 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:48:43,344 INFO [M:0;c302dddf724e:46149 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T00:48:43,344 INFO [M:0;c302dddf724e:46149 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46149 2024-11-08T00:48:43,344 INFO [M:0;c302dddf724e:46149 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:48:43,447 INFO [M:0;c302dddf724e:46149 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:48:43,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:48:43,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46149-0x100f50c52a90000, quorum=127.0.0.1:63994, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:48:43,453 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47a5f093{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:48:43,456 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17ecf0d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:48:43,456 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:48:43,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d84e403{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:48:43,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15f32f09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/hadoop.log.dir/,STOPPED} 2024-11-08T00:48:43,460 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:48:43,460 WARN [BP-1124388816-172.17.0.2-1731026902973 heartbeating to localhost/127.0.0.1:36609 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:48:43,460 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:48:43,460 WARN [BP-1124388816-172.17.0.2-1731026902973 heartbeating to localhost/127.0.0.1:36609 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1124388816-172.17.0.2-1731026902973 (Datanode Uuid 94c00e7b-aaee-46ce-8dc1-e851c27a3c97) service to localhost/127.0.0.1:36609 2024-11-08T00:48:43,462 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/data/data3/current/BP-1124388816-172.17.0.2-1731026902973 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:48:43,462 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/data/data4/current/BP-1124388816-172.17.0.2-1731026902973 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:48:43,463 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:48:43,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@286f0a0e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:48:43,465 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cda2dcd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:48:43,465 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:48:43,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f308f72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:48:43,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7826ba03{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/hadoop.log.dir/,STOPPED} 2024-11-08T00:48:43,468 WARN [BP-1124388816-172.17.0.2-1731026902973 heartbeating to localhost/127.0.0.1:36609 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:48:43,468 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:48:43,468 WARN [BP-1124388816-172.17.0.2-1731026902973 heartbeating to localhost/127.0.0.1:36609 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1124388816-172.17.0.2-1731026902973 (Datanode Uuid 52ce8828-7352-4067-b8d2-b05ef156c26e) service to localhost/127.0.0.1:36609 2024-11-08T00:48:43,468 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:48:43,468 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/data/data1/current/BP-1124388816-172.17.0.2-1731026902973 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:48:43,468 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/cluster_12317072-aa6d-428e-8711-1fc6d7e41e59/data/data2/current/BP-1124388816-172.17.0.2-1731026902973 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:48:43,469 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:48:43,479 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@300f56e1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:48:43,480 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51edec43{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:48:43,480 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:48:43,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b1a9f49{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:48:43,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d649244{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e0e7684-80bb-c308-9605-91cd775752be/hadoop.log.dir/,STOPPED} 2024-11-08T00:48:43,489 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T00:48:43,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down